Use Query instances as iterables when possible

A Query.all() call creates a list of a DB query results.
We can actually use a Query instance as an iterable and
thus avoid creation of a list if we just need to iterate
over those values only once.

Fixes bug 1173133

Change-Id: I06d8eaa9eaba4c71aa0c98000735cb9d83ea19a3
This commit is contained in:
Roman Podolyaka 2013-04-25 16:15:59 +03:00
parent 6672769026
commit c7afedad00
19 changed files with 116 additions and 108 deletions

View File

@ -62,6 +62,19 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
network_scheduler = None
router_scheduler = None
@staticmethod
def is_eligible_agent(active, agent):
if active is None:
# filtering by activeness is disabled, all agents are eligible
return True
else:
# note(rpodolyaka): original behaviour is saved here: if active
# filter is set, only agents which are 'up'
# (i.e. have a recent heartbeat timestamp)
# are eligible, even if active is False
return not agents_db.AgentDbMixin.is_agent_down(
agent['heartbeat_timestamp'])
def get_dhcp_agents_hosting_networks(
self, context, network_ids, active=None):
if not network_ids:
@ -76,13 +89,11 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
NetworkDhcpAgentBinding.network_id in network_ids)
if active is not None:
query = (query.filter(agents_db.Agent.admin_state_up == active))
dhcp_agents = [binding.dhcp_agent for binding in query.all()]
if active is not None:
dhcp_agents = [dhcp_agent for dhcp_agent in
dhcp_agents if not
agents_db.AgentDbMixin.is_agent_down(
dhcp_agent['heartbeat_timestamp'])]
return dhcp_agents
return [binding.dhcp_agent
for binding in query
if AgentSchedulerDbMixin.is_eligible_agent(active,
binding.dhcp_agent)]
def add_network_to_dhcp_agent(self, context, id, network_id):
self._get_network(context, network_id)
@ -123,12 +134,12 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
def list_networks_on_dhcp_agent(self, context, id):
query = context.session.query(NetworkDhcpAgentBinding.network_id)
net_ids = query.filter(
NetworkDhcpAgentBinding.dhcp_agent_id == id).all()
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
net_ids = [item[0] for item in query]
if net_ids:
_ids = [item[0] for item in net_ids]
return {'networks':
self.get_networks(context, filters={'id': _ids})}
self.get_networks(context, filters={'id': net_ids})}
else:
return {'networks': []}
@ -138,12 +149,14 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
if not agent.admin_state_up:
return []
query = context.session.query(NetworkDhcpAgentBinding.network_id)
net_ids = query.filter(
NetworkDhcpAgentBinding.dhcp_agent_id == agent.id).all()
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
net_ids = [item[0] for item in query]
if net_ids:
_ids = [item[0] for item in net_ids]
return self.get_networks(
context, filters={'id': _ids, 'admin_state_up': [True]})
context,
filters={'id': net_ids, 'admin_state_up': [True]}
)
else:
return []
@ -214,12 +227,12 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
def list_routers_on_l3_agent(self, context, id):
query = context.session.query(RouterL3AgentBinding.router_id)
router_ids = query.filter(
RouterL3AgentBinding.l3_agent_id == id).all()
query = query.filter(RouterL3AgentBinding.l3_agent_id == id)
router_ids = [item[0] for item in query]
if router_ids:
_ids = [item[0] for item in router_ids]
return {'routers':
self.get_routers(context, filters={'id': _ids})}
self.get_routers(context, filters={'id': router_ids})}
else:
return {'routers': []}
@ -234,13 +247,13 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
RouterL3AgentBinding.l3_agent_id == agent.id)
if router_id:
query = query.filter(RouterL3AgentBinding.router_id == router_id)
router_ids = query.all()
router_ids = [item[0] for item in query]
if router_ids:
_ids = [item[0] for item in router_ids]
routers = self.get_sync_data(context, router_ids=_ids,
active=True)
return routers
return []
return self.get_sync_data(context, router_ids=router_ids,
active=True)
else:
return []
def get_l3_agents_hosting_routers(self, context, router_ids,
admin_state_up=None,
@ -257,7 +270,7 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
if admin_state_up is not None:
query = (query.filter(agents_db.Agent.admin_state_up ==
admin_state_up))
l3_agents = [binding.l3_agent for binding in query.all()]
l3_agents = [binding.l3_agent for binding in query]
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
@ -313,13 +326,10 @@ class AgentSchedulerDbMixin(agentscheduler.AgentSchedulerPluginBase,
column = getattr(agents_db.Agent, key, None)
if column:
query = query.filter(column.in_(value))
l3_agents = query.all()
if active is not None:
l3_agents = [l3_agent for l3_agent in
l3_agents if not
agents_db.AgentDbMixin.is_agent_down(
l3_agent['heartbeat_timestamp'])]
return l3_agents
return [l3_agent
for l3_agent in query
if AgentSchedulerDbMixin.is_eligible_agent(active, l3_agent)]
def get_l3_agent_candidates(self, sync_router, l3_agents):
"""Get the valid l3 agents for the router from a list of l3_agents."""

View File

@ -237,7 +237,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [dict_func(c, fields) for c in query.all()]
items = [dict_func(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items
@ -319,7 +319,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
expired_qry = expired_qry.filter(
models_v2.IPAllocation.expiration <= timeutils.utcnow())
for expired in expired_qry.all():
for expired in expired_qry:
QuantumDbPluginV2._recycle_ip(context,
network_id,
expired['subnet_id'],
@ -338,7 +338,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
# Grab all allocation pools for the subnet
pool_qry = context.session.query(
models_v2.IPAllocationPool).with_lockmode('update')
allocation_pools = pool_qry.filter_by(subnet_id=subnet_id).all()
allocation_pools = pool_qry.filter_by(subnet_id=subnet_id)
# Find the allocation pool for the IP to recycle
pool_id = None
for allocation_pool in allocation_pools:
@ -494,7 +494,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
models_v2.IPAvailabilityRange,
models_v2.IPAllocationPool).join(
models_v2.IPAllocationPool).with_lockmode('update')
results = range_qry.filter_by(subnet_id=subnet_id).all()
results = range_qry.filter_by(subnet_id=subnet_id)
for (range, pool) in results:
first = int(netaddr.IPAddress(range['first_ip']))
last = int(netaddr.IPAddress(range['last_ip']))
@ -560,7 +560,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
# Check if the requested IP is in a defined allocation pool
pool_qry = context.session.query(models_v2.IPAllocationPool)
allocation_pools = pool_qry.filter_by(subnet_id=subnet_id).all()
allocation_pools = pool_qry.filter_by(subnet_id=subnet_id)
ip = netaddr.IPAddress(ip_address)
for allocation_pool in allocation_pools:
allocation_pool_range = netaddr.IPRange(
@ -872,10 +872,10 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
return
ports = self._model_query(
context, models_v2.Port).filter(
models_v2.Port.network_id == id).all()
models_v2.Port.network_id == id)
subnets = self._model_query(
context, models_v2.Subnet).filter(
models_v2.Subnet.network_id == id).all()
models_v2.Subnet.network_id == id)
tenant_ids = set([port['tenant_id'] for port in ports] +
[subnet['tenant_id'] for subnet in subnets])
# raise if multiple tenants found or if the only tenant found
@ -1238,7 +1238,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
# Check if any tenant owned ports are using this subnet
allocated_qry = context.session.query(models_v2.IPAllocation)
allocated_qry = allocated_qry.options(orm.joinedload('ports'))
allocated = allocated_qry.filter_by(subnet_id=id).all()
allocated = allocated_qry.filter_by(subnet_id=id)
only_auto_del = all(not a.port_id or
a.ports.device_owner in AUTO_DELETE_PORT_OWNERS
@ -1247,8 +1247,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
raise q_exc.SubnetInUse(subnet_id=id)
# remove network owned ports
for allocation in allocated:
context.session.delete(allocation)
allocated.delete()
context.session.delete(subnet)
@ -1382,30 +1381,29 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
allocated_qry = context.session.query(
models_v2.IPAllocation).with_lockmode('update')
# recycle all of the IP's
allocated = allocated_qry.filter_by(port_id=id).all()
if allocated:
for a in allocated:
subnet = self._get_subnet(context, a['subnet_id'])
# Check if IP was allocated from allocation pool
if QuantumDbPluginV2._check_ip_in_allocation_pool(
context, a['subnet_id'], subnet['gateway_ip'],
a['ip_address']):
QuantumDbPluginV2._hold_ip(context,
a['network_id'],
a['subnet_id'],
id,
a['ip_address'])
else:
# IPs out of allocation pool will not be recycled, but
# we do need to delete the allocation from the DB
QuantumDbPluginV2._delete_ip_allocation(
context, a['network_id'],
a['subnet_id'], a['ip_address'])
msg_dict = {'address': a['ip_address'],
'subnet_id': a['subnet_id']}
msg = _("%(address)s (%(subnet_id)s) is not "
"recycled") % msg_dict
LOG.debug(msg)
allocated = allocated_qry.filter_by(port_id=id)
for a in allocated:
subnet = self._get_subnet(context, a['subnet_id'])
# Check if IP was allocated from allocation pool
if QuantumDbPluginV2._check_ip_in_allocation_pool(
context, a['subnet_id'], subnet['gateway_ip'],
a['ip_address']):
QuantumDbPluginV2._hold_ip(context,
a['network_id'],
a['subnet_id'],
id,
a['ip_address'])
else:
# IPs out of allocation pool will not be recycled, but
# we do need to delete the allocation from the DB
QuantumDbPluginV2._delete_ip_allocation(
context, a['network_id'],
a['subnet_id'], a['ip_address'])
msg_dict = {'address': a['ip_address'],
'subnet_id': a['subnet_id']}
msg = _("%(address)s (%(subnet_id)s) is not "
"recycled") % msg_dict
LOG.debug(msg)
context.session.delete(port)
@ -1448,7 +1446,7 @@ class QuantumDbPluginV2(quantum_plugin_base_v2.QuantumPluginBaseV2):
sorts=sorts, limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
items = [self._make_port_dict(c, fields) for c in query.all()]
items = [self._make_port_dict(c, fields) for c in query]
if limit and page_reverse:
items.reverse()
return items

View File

@ -131,8 +131,7 @@ class ExtraRoute_db_mixin(l3_db.L3_NAT_db_mixin):
def _get_extra_routes_by_router_id(self, context, id):
query = context.session.query(RouterRoute)
query.filter(RouterRoute.router_id == id)
extra_routes = query.all()
return self._make_extra_route_list(extra_routes)
return self._make_extra_route_list(query)
def get_router(self, context, id, fields=None):
with context.session.begin(subtransactions=True):

View File

@ -285,8 +285,7 @@ class L3_NAT_db_mixin(l3.RouterPluginBase):
network_id, subnet_id, subnet_cidr):
try:
rport_qry = context.session.query(models_v2.Port)
rports = rport_qry.filter_by(
device_id=router_id).all()
rports = rport_qry.filter_by(device_id=router_id)
# its possible these ports on on the same network, but
# different subnet
new_ipnet = netaddr.IPNetwork(subnet_cidr)
@ -427,7 +426,7 @@ class L3_NAT_db_mixin(l3.RouterPluginBase):
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id']).all()
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
@ -833,8 +832,8 @@ class L3_NAT_db_mixin(l3.RouterPluginBase):
if not vals:
return nets
ext_nets = set([en['network_id'] for en in
context.session.query(ExternalNetwork).all()])
ext_nets = set(en['network_id']
for en in context.session.query(ExternalNetwork))
if vals[0]:
return [n for n in nets if n['id'] in ext_nets]
else:

View File

@ -196,7 +196,7 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase):
fields=None, sorts=None, limit=None, marker_obj=None,
page_reverse=False):
query = self._get_collection_query(context, model, filters)
return [dict_func(c, fields) for c in query.all()]
return [dict_func(c, fields) for c in query]
def _get_collection_count(self, context, model, filters=None):
return self._get_collection_query(context, model, filters).count()
@ -463,7 +463,7 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase):
with context.session.begin(subtransactions=True):
vip = self._get_resource(context, Vip, id)
qry = context.session.query(Pool)
for pool in qry.filter_by(vip_id=id).all():
for pool in qry.filter_by(vip_id=id):
pool.update({"vip_id": None})
context.session.delete(vip)
@ -576,7 +576,7 @@ class LoadBalancerPluginDb(LoadBalancerPluginBase):
collection = self._model_query(context, Pool)
collection = self._apply_filters_to_query(collection, Pool, filters)
return [self._make_pool_dict(c, fields)
for c in collection.all()]
for c in collection]
def stats(self, context, pool_id):
with context.session.begin(subtransactions=True):

View File

@ -58,7 +58,7 @@ class DbQuotaDriver(object):
# update with tenant specific limits
q_qry = context.session.query(Quota).filter_by(tenant_id=tenant_id)
tenant_quota.update((q['resource'], q['limit']) for q in q_qry.all())
tenant_quota.update((q['resource'], q['limit']) for q in q_qry)
return tenant_quota
@ -69,10 +69,9 @@ class DbQuotaDriver(object):
Atfer deletion, this tenant will use default quota values in conf.
"""
with context.session.begin():
tenant_quotas = context.session.query(Quota).filter_by(
tenant_id=tenant_id).all()
for quota in tenant_quotas:
context.session.delete(quota)
tenant_quotas = context.session.query(Quota)
tenant_quotas = tenant_quotas.filter_by(tenant_id=tenant_id)
tenant_quotas.delete()
@staticmethod
def get_all_quotas(context, resources):
@ -91,7 +90,7 @@ class DbQuotaDriver(object):
all_tenant_quotas = {}
for quota in context.session.query(Quota).all():
for quota in context.session.query(Quota):
tenant_id = quota['tenant_id']
# avoid setdefault() because only want to copy when actually req'd

View File

@ -177,8 +177,7 @@ class SecurityGroupServerRpcCallbackMixin(object):
query = query.join(models_v2.IPAllocation,
ip_port == sg_binding_port)
query = query.filter(sg_binding_sgid.in_(remote_group_ids))
ip_in_db = query.all()
for security_group_id, ip_address in ip_in_db:
for security_group_id, ip_address in query:
ips_by_group[security_group_id].append(ip_address)
return ips_by_group
@ -208,7 +207,7 @@ class SecurityGroupServerRpcCallbackMixin(object):
for network_id in network_ids:
ips[network_id] = []
for port, ip in query.all():
for port, ip in query:
ips[port['network_id']].append(ip)
return ips

View File

@ -268,7 +268,7 @@ class ServiceTypeManager(object):
if column:
query = query.filter(column.in_(value))
return [self._make_svc_type_dict(context, svc_type, fields)
for svc_type in query.all()]
for svc_type in query]
def create_service_type(self, context, service_type):
"""Create a new service type."""

View File

@ -255,7 +255,7 @@ class PluginV2(db_base_plugin_v2.QuantumDbPluginV2):
# Check if ports are using this subnet
allocated_qry = context.session.query(models_v2.IPAllocation)
allocated_qry = allocated_qry.options(orm.joinedload('ports'))
allocated = allocated_qry.filter_by(subnet_id=id).all()
allocated = allocated_qry.filter_by(subnet_id=id)
prefix = db_base_plugin_v2.AGENT_OWNER_PREFIX
if not all(not a.port_id or a.ports.device_owner.startswith(prefix)

View File

@ -190,7 +190,7 @@ class HyperVPluginDB(object):
# get existing allocations for all physical networks
allocations = dict()
allocs_q = session.query(hyperv_model.VlanAllocation)
for alloc in allocs_q.all():
for alloc in allocs_q:
allocations.setdefault(alloc.physical_network,
set()).add(alloc)

View File

@ -217,7 +217,7 @@ class MetaPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
column = getattr(models_v2.Network, key, None)
if column:
collection = collection.filter(column.in_(value))
return [self._make_network_dict(c, fields) for c in collection.all()]
return [self._make_network_dict(c, fields) for c in collection]
def get_networks(self, context, filters=None, fields=None):
nets = self.get_networks_with_flavor(context, filters, None)
@ -333,7 +333,7 @@ class MetaPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
column = getattr(l3_db.Router, key, None)
if column:
collection = collection.filter(column.in_(value))
return [self._make_router_dict(c, fields) for c in collection.all()]
return [self._make_router_dict(c, fields) for c in collection]
def get_routers(self, context, filters=None, fields=None):
routers = self.get_routers_with_flavor(context, filters,

View File

@ -831,7 +831,7 @@ class MidonetPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=network_id).all()
network_id=network_id)
network_port = None
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:

View File

@ -1639,7 +1639,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id']).all()
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']

View File

@ -230,14 +230,14 @@ class NVPQoSDbMixin(ext_qos.QueuePluginBase):
filters = {'device_id': [port.get('device_id')],
'network_id': [network['network_id'] for
network in networks_with_same_queue]}
query = self._model_query(context, models_v2.Port)
ports = self._apply_filters_to_query(query, models_v2.Port,
filters).all()
if ports:
query = self._model_query(context, models_v2.Port.id)
query = self._apply_filters_to_query(query, models_v2.Port,
filters)
ports_ids = [p[0] for p in query]
if ports_ids:
# shared queue already exists find the queue id
filters = {'port_id': [p['id'] for p in ports]}
queues = self._get_port_queue_bindings(context, filters,
queues = self._get_port_queue_bindings(context,
{'port_id': ports_ids},
['queue_id'])
if queues:
return queues[0]['queue_id']

View File

@ -355,6 +355,8 @@ def set_port_status(port_id, status):
def get_tunnel_endpoints():
session = db.get_session()
try:
# TODO(rpodolyaka): Query.all() can't raise the NoResultNound exception
# Fix this later along with other identical cases.
tunnels = session.query(ovs_models_v2.TunnelEndpoint).all()
except exc.NoResultFound:
return []
@ -364,6 +366,8 @@ def get_tunnel_endpoints():
def _generate_tunnel_id(session):
try:
# TODO(rpodolyaka): Query.all() can't raise the NoResultNound exception
# Fix this later along with other identical cases.
tunnels = session.query(ovs_models_v2.TunnelEndpoint).all()
except exc.NoResultFound:
return 0

View File

@ -132,7 +132,7 @@ class RyuQuantumPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
for tun in self.tunnel_key.all_list():
self.tun_client.update_tunnel_key(tun.network_id, tun.tunnel_key)
session = db.get_session()
for port in session.query(models_v2.Port).all():
for port in session.query(models_v2.Port):
self.iface_client.update_network_id(port.id, port.network_id)
def _client_create_network(self, net_id, tunnel_key):

View File

@ -57,7 +57,7 @@ class LoadBalancerCallbacks(object):
up = True # makes pep8 and sqlalchemy happy
qry = qry.filter(loadbalancer_db.Vip.admin_state_up == up)
qry = qry.filter(loadbalancer_db.Pool.admin_state_up == up)
return [id for id, in qry.all()]
return [id for id, in qry]
def get_logical_device(self, context, pool_id=None, activate=True,
**kwargs):
@ -321,7 +321,7 @@ class LoadBalancerPlugin(loadbalancer_db.LoadBalancerPluginDb):
)
qry = qry.filter_by(monitor_id=hm['id'])
for assoc in qry.all():
for assoc in qry:
self.agent_rpc.modify_pool(context, assoc['pool_id'])
return hm
@ -332,7 +332,7 @@ class LoadBalancerPlugin(loadbalancer_db.LoadBalancerPluginDb):
)
qry = qry.filter_by(monitor_id=id)
pool_ids = [a['pool_id'] for a in qry.all()]
pool_ids = [a['pool_id'] for a in qry]
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
for pid in pool_ids:
self.agent_rpc.modify_pool(context, pid)

View File

@ -337,8 +337,8 @@ class NetworkGatewayDbTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
session = db_api.get_session()
gw_query = session.query(nicira_networkgw_db.NetworkGateway)
dev_query = session.query(nicira_networkgw_db.NetworkGatewayDevice)
self.assertEqual(exp_gw_count, len(gw_query.all()))
self.assertEqual(0, len(dev_query.all()))
self.assertEqual(exp_gw_count, gw_query.count())
self.assertEqual(0, dev_query.count())
def test_delete_network_gateway(self):
self._test_delete_network_gateway()

View File

@ -1709,7 +1709,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
q = update_context.session.query(models_v2.IPAllocation)
q = q.filter_by(port_id=None, ip_address=ip_address)
self.assertEqual(len(q.all()), 1)
self.assertEqual(q.count(), 1)
def test_recycle_held_ip_address(self):
plugin = QuantumManager.get_plugin()