nova-2014.1.5/0000775000567000056700000000000012540643452014054 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/0000775000567000056700000000000012540643452015017 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/rdp/0000775000567000056700000000000012540643452015604 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/rdp/__init__.py0000664000567000056700000000206312540642543017716 0ustar jenkinsjenkins00000000000000# Copyright 2014 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for RDP Proxying.""" from oslo.config import cfg rdp_opts = [ cfg.StrOpt('html5_proxy_base_url', default='http://127.0.0.1:6083/', help='Location of RDP html5 console proxy, in the form ' '"http://127.0.0.1:6083/"'), cfg.BoolOpt('enabled', default=False, help='Enable RDP related features'), ] CONF = cfg.CONF CONF.register_opts(rdp_opts, group='rdp') nova-2014.1.5/nova/db/0000775000567000056700000000000012540643452015404 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/db/base.py0000664000567000056700000000254312540642543016674 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base class for classes that need modular database access.""" from oslo.config import cfg from nova.openstack.common import importutils db_driver_opt = cfg.StrOpt('db_driver', default='nova.db', help='The driver to use for database access') CONF = cfg.CONF CONF.register_opt(db_driver_opt) class Base(object): """DB driver is injected in the init method.""" def __init__(self, db_driver=None): super(Base, self).__init__() if not db_driver: db_driver = CONF.db_driver self.db = importutils.import_module(db_driver) # pylint: disable=C0103 nova-2014.1.5/nova/db/sqlalchemy/0000775000567000056700000000000012540643452017546 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/db/sqlalchemy/types.py0000664000567000056700000000415012540642543021264 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Custom SQLAlchemy types.""" from sqlalchemy.dialects import postgresql from sqlalchemy import types from nova import utils class IPAddress(types.TypeDecorator): """An SQLAlchemy type representing an IP-address.""" impl = types.String def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(postgresql.INET()) else: return dialect.type_descriptor(types.String(39)) def process_bind_param(self, value, dialect): """Process/Formats the value before insert it into the db.""" if dialect.name == 'postgresql': return value # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened # form, not validate it. elif utils.is_valid_ipv6(value): return utils.get_shortened_ipv6(value) return value class CIDR(types.TypeDecorator): """An SQLAlchemy type representing a CIDR definition.""" impl = types.String def load_dialect_impl(self, dialect): if dialect.name == 'postgresql': return dialect.type_descriptor(postgresql.INET()) else: return dialect.type_descriptor(types.String(43)) def process_bind_param(self, value, dialect): """Process/Formats the value before insert it into the db.""" # NOTE(sdague): normalize all the inserts if utils.is_valid_ipv6_cidr(value): return utils.get_shortened_ipv6_cidr(value) return value nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/0000775000567000056700000000000012540643452022223 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/migrate.cfg0000664000567000056700000000173312540642532024336 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=nova # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/0000775000567000056700000000000012540643452024073 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/228_add_metrics_in_compute_nodes.py0000664000567000056700000000305712540642543032735 0ustar jenkinsjenkins00000000000000# Copyright 2013 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Add a new column metrics to save metrics info for compute nodes compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) metrics = Column('metrics', Text, nullable=True) shadow_metrics = Column('metrics', Text, nullable=True) compute_nodes.create_column(metrics) shadow_compute_nodes.create_column(shadow_metrics) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Remove the new column compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) compute_nodes.drop_column('metrics') shadow_compute_nodes.drop_column('metrics') ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/230_add_details_column_to_instance_actions_events.pynova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/230_add_details_column_to_instance_actions_ev0000664000567000056700000000316212540642543035016 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, String, Text from nova.db.sqlalchemy import api from nova.db.sqlalchemy import utils def upgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') host = Column('host', String(255)) details = Column('details', Text) actions_events.create_column(host) actions_events.create_column(details) shadow_actions_events = utils.get_table(migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.create_column(host.copy()) shadow_actions_events.create_column(details.copy()) def downgrade(migrate_engine): actions_events = utils.get_table(migrate_engine, 'instance_actions_events') actions_events.drop_column('host') actions_events.drop_column('details') shadow_actions_events = utils.get_table(migrate_engine, api._SHADOW_TABLE_PREFIX + 'instance_actions_events') shadow_actions_events.drop_column('host') shadow_actions_events.drop_column('details') nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/218_placeholder.py0000664000567000056700000000165112540642543027324 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/219_placeholder.py0000664000567000056700000000165112540642543027325 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/223_placeholder.py0000664000567000056700000000165112540642543027320 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/226_placeholder.py0000664000567000056700000000165112540642543027323 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/225_placeholder.py0000664000567000056700000000165112540642543027322 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/229_add_extra_resources_in_compute_nodes.py0000664000567000056700000000324312540642543034502 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import MetaData from sqlalchemy import Table from sqlalchemy import Text def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Add a new column metrics to save metrics info for compute nodes compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) extra_resources = Column('extra_resources', Text, nullable=True) shadow_extra_resources = Column('extra_resources', Text, nullable=True) compute_nodes.create_column(extra_resources) shadow_compute_nodes.create_column(shadow_extra_resources) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine # Remove the new column compute_nodes = Table('compute_nodes', meta, autoload=True) shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True) compute_nodes.drop_column('extra_resources') shadow_compute_nodes.drop_column('extra_resources') nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/217_placeholder.py0000664000567000056700000000165112540642543027323 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/233_add_stats_in_compute_nodes.py0000664000567000056700000000670412540642543032423 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import ForeignKey from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import Text from nova.openstack.common import timeutils def upgrade(engine): meta = MetaData() meta.bind = engine # Drop the compute_node_stats table and add a 'stats' column to # compute_nodes directly. The data itself is transient and doesn't # need to be copied over. table_names = ('compute_node_stats', 'shadow_compute_node_stats') for table_name in table_names: table = Table(table_name, meta, autoload=True) table.drop() # Add a new stats column to compute nodes table_names = ('compute_nodes', 'shadow_compute_nodes') for table_name in table_names: table = Table(table_name, meta, autoload=True) stats = Column('stats', Text, default='{}') table.create_column(stats) def downgrade(engine): meta = MetaData() meta.bind = engine table_names = ('compute_nodes', 'shadow_compute_nodes') for table_name in table_names: table = Table(table_name, meta, autoload=True) table.drop_column('stats') if engine.name == 'mysql': fk_name = 'fk_compute_node_stats_compute_node_id' else: fk_name = 'compute_node_stats_compute_node_id_fkey' table = Table('compute_node_stats', meta, Column('created_at', DateTime, default=timeutils.utcnow), Column('updated_at', DateTime, onupdate=timeutils.utcnow), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, nullable=False), Column('key', String(255), nullable=False), Column('value', String(255), nullable=True), Column('compute_node_id', Integer, ForeignKey('compute_nodes.id', name=fk_name), index=True), Index('compute_node_stats_node_id_and_deleted_idx', 'compute_node_id', 'deleted'), mysql_engine='InnoDB', mysql_charset='utf8' ) table.create() # shadow version has no fkey or index table = Table('shadow_compute_node_stats', meta, Column('created_at', DateTime, default=timeutils.utcnow), Column('updated_at', DateTime, onupdate=timeutils.utcnow), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('key', String(255), nullable=False), Column('value', String(255), nullable=True), Column('compute_node_id', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) table.create() nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/__init__.py0000664000567000056700000000114412540642543026204 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/222_placeholder.py0000664000567000056700000000165112540642543027317 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/231_add_ephemeral_key_uuid.py0000664000567000056700000000336612540642543031512 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, Column, Table from sqlalchemy import String def upgrade(migrate_engine): """Function adds ephemeral storage encryption key uuid field.""" meta = MetaData(bind=migrate_engine) instances = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) ephemeral_key_uuid = Column('ephemeral_key_uuid', String(36)) instances.create_column(ephemeral_key_uuid) shadow_instances.create_column(ephemeral_key_uuid.copy()) migrate_engine.execute(instances.update(). values(ephemeral_key_uuid=None)) migrate_engine.execute(shadow_instances.update(). values(ephemeral_key_uuid=None)) def downgrade(migrate_engine): """Function removes ephemeral storage encryption key uuid field.""" meta = MetaData(bind=migrate_engine) instances = Table('instances', meta, autoload=True) shadow_instances = Table('shadow_instances', meta, autoload=True) instances.c.ephemeral_key_uuid.drop() shadow_instances.c.ephemeral_key_uuid.drop() nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/221_placeholder.py0000664000567000056700000000165112540642543027316 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/232_drop_dump_tables.py0000664000567000056700000000210212540642543030351 0ustar jenkinsjenkins00000000000000# Copyright 2014, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData from sqlalchemy import Table def upgrade(migrate_engine): meta = MetaData(migrate_engine) meta.reflect(migrate_engine) table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions', 'instance_actions_events', 'instance_faults', 'migrations'] for table_name in table_names: table = Table('dump_' + table_name, meta) table.drop(checkfirst=True) def downgrade(migrate_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/224_placeholder.py0000664000567000056700000000165112540642543027321 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/216_havana.py0000664000567000056700000017354412540642543026311 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import UniqueConstraint from migrate import ForeignKeyConstraint from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float from sqlalchemy import dialects from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table from sqlalchemy import Text from sqlalchemy.types import NullType from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) # Note on the autoincrement flag: this is defaulted for primary key columns # of integral type, so is no longer set explicitly in such cases. # NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL # Schema. In Folsom we created tables as latin1 and converted them to utf8 # later. This conversion causes some of the Text columns on MySQL to get # created as mediumtext instead of just text. def MediumText(): return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql') def Inet(): return String(length=43).with_variant(dialects.postgresql.INET(), 'postgresql') def InetSmall(): return String(length=39).with_variant(dialects.postgresql.INET(), 'postgresql') def _create_shadow_tables(migrate_engine): meta = MetaData(migrate_engine) meta.reflect(migrate_engine) table_names = meta.tables.keys() meta.bind = migrate_engine for table_name in table_names: table = Table(table_name, meta, autoload=True) columns = [] for column in table.columns: column_copy = None # NOTE(boris-42): BigInteger is not supported by sqlite, so # after copy it will have NullType, other # types that are used in Nova are supported by # sqlite. if isinstance(column.type, NullType): column_copy = Column(column.name, BigInteger(), default=0) if table_name == 'instances' and column.name == 'locked_by': enum = Enum('owner', 'admin', name='shadow_instances0locked_by') column_copy = Column(column.name, enum) else: column_copy = column.copy() columns.append(column_copy) shadow_table_name = 'shadow_' + table_name shadow_table = Table(shadow_table_name, meta, *columns, mysql_engine='InnoDB') try: shadow_table.create() except Exception: LOG.info(repr(shadow_table)) LOG.exception(_('Exception while creating table.')) raise def _populate_instance_types(instance_types_table): default_inst_types = { 'm1.tiny': dict(mem=512, vcpus=1, root_gb=1, eph_gb=0, flavid=1), 'm1.small': dict(mem=2048, vcpus=1, root_gb=20, eph_gb=0, flavid=2), 'm1.medium': dict(mem=4096, vcpus=2, root_gb=40, eph_gb=0, flavid=3), 'm1.large': dict(mem=8192, vcpus=4, root_gb=80, eph_gb=0, flavid=4), 'm1.xlarge': dict(mem=16384, vcpus=8, root_gb=160, eph_gb=0, flavid=5) } try: i = instance_types_table.insert() for name, values in default_inst_types.iteritems(): i.execute({'name': name, 'memory_mb': values["mem"], 'vcpus': values["vcpus"], 'deleted': 0, 'root_gb': values["root_gb"], 'ephemeral_gb': values["eph_gb"], 'rxtx_factor': 1, 'swap': 0, 'flavorid': values["flavid"], 'disabled': False, 'is_public': True}) except Exception: LOG.info(repr(instance_types_table)) LOG.exception(_('Exception while seeding instance_types table')) raise # NOTE(dprince): we add these here so our schema contains dump tables # which were added in migration 209 (in Havana). We can drop these in # Icehouse: https://bugs.launchpad.net/nova/+bug/1266538 def _create_dump_tables(migrate_engine): meta = MetaData(migrate_engine) meta.reflect(migrate_engine) table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions', 'instance_actions_events', 'instance_faults', 'migrations'] for table_name in table_names: table = Table(table_name, meta, autoload=True) dump_table_name = 'dump_' + table.name columns = [] for column in table.columns: # NOTE(dprince): The dump_ tables were originally created from an # earlier schema version so we don't want to add the pci_stats # column so that schema diffs are exactly the same. if column.name == 'pci_stats': continue else: columns.append(column.copy()) table_dump = Table(dump_table_name, meta, *columns, mysql_engine='InnoDB') table_dump.create() def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine agent_builds = Table('agent_builds', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('hypervisor', String(length=255)), Column('os', String(length=255)), Column('architecture', String(length=255)), Column('version', String(length=255)), Column('url', String(length=255)), Column('md5hash', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) aggregate_hosts = Table('aggregate_hosts', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('aggregate_id', Integer, ForeignKey('aggregates.id'), nullable=False), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) aggregate_metadata = Table('aggregate_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('aggregate_id', Integer, ForeignKey('aggregates.id'), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=255), nullable=False), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) aggregates = Table('aggregates', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) block_device_mapping = Table('block_device_mapping', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('device_name', String(length=255), nullable=True), Column('delete_on_termination', Boolean), Column('snapshot_id', String(length=36), nullable=True), Column('volume_id', String(length=36), nullable=True), Column('volume_size', Integer), Column('no_device', Boolean), Column('connection_info', MediumText()), Column('instance_uuid', String(length=36)), Column('deleted', Integer), Column('source_type', String(length=255), nullable=True), Column('destination_type', String(length=255), nullable=True), Column('guest_format', String(length=255), nullable=True), Column('device_type', String(length=255), nullable=True), Column('disk_bus', String(length=255), nullable=True), Column('boot_index', Integer), Column('image_id', String(length=36), nullable=True), mysql_engine='InnoDB', mysql_charset='utf8' ) bw_usage_cache = Table('bw_usage_cache', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('start_period', DateTime, nullable=False), Column('last_refreshed', DateTime), Column('bw_in', BigInteger), Column('bw_out', BigInteger), Column('mac', String(length=255)), Column('uuid', String(length=36)), Column('last_ctr_in', BigInteger()), Column('last_ctr_out', BigInteger()), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) cells = Table('cells', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('api_url', String(length=255)), Column('weight_offset', Float), Column('weight_scale', Float), Column('name', String(length=255)), Column('is_parent', Boolean), Column('deleted', Integer), Column('transport_url', String(length=255), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8' ) certificates = Table('certificates', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('file_name', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) compute_node_stats = Table('compute_node_stats', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('compute_node_id', Integer, nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) compute_nodes = Table('compute_nodes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('service_id', Integer, nullable=False), Column('vcpus', Integer, nullable=False), Column('memory_mb', Integer, nullable=False), Column('local_gb', Integer, nullable=False), Column('vcpus_used', Integer, nullable=False), Column('memory_mb_used', Integer, nullable=False), Column('local_gb_used', Integer, nullable=False), Column('hypervisor_type', MediumText(), nullable=False), Column('hypervisor_version', Integer, nullable=False), Column('cpu_info', MediumText(), nullable=False), Column('disk_available_least', Integer), Column('free_ram_mb', Integer), Column('free_disk_gb', Integer), Column('current_workload', Integer), Column('running_vms', Integer), Column('hypervisor_hostname', String(length=255)), Column('deleted', Integer), Column('host_ip', InetSmall()), Column('supported_instances', Text), Column('pci_stats', Text, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8' ) console_pools = Table('console_pools', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('address', InetSmall()), Column('username', String(length=255)), Column('password', String(length=255)), Column('console_type', String(length=255)), Column('public_hostname', String(length=255)), Column('host', String(length=255)), Column('compute_host', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) consoles = Table('consoles', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('instance_name', String(length=255)), Column('password', String(length=255)), Column('port', Integer), Column('pool_id', Integer, ForeignKey('console_pools.id')), Column('instance_uuid', String(length=36), ForeignKey('instances.uuid', name='consoles_instance_uuid_fkey')), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) dns_domains = Table('dns_domains', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('domain', String(length=255), primary_key=True, nullable=False), Column('scope', String(length=255)), Column('availability_zone', String(length=255)), Column('project_id', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) fixed_ips = Table('fixed_ips', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('address', InetSmall()), Column('network_id', Integer), Column('allocated', Boolean), Column('leased', Boolean), Column('reserved', Boolean), Column('virtual_interface_id', Integer), Column('host', String(length=255)), Column('instance_uuid', String(length=36)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) floating_ips = Table('floating_ips', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('address', InetSmall()), Column('fixed_ip_id', Integer), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('auto_assigned', Boolean), Column('pool', String(length=255)), Column('interface', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_faults = Table('instance_faults', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('instance_uuid', String(length=36)), Column('code', Integer, nullable=False), Column('message', String(length=255)), Column('details', MediumText()), Column('host', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_id_mappings = Table('instance_id_mappings', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(36), nullable=False), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_info_caches = Table('instance_info_caches', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('network_info', MediumText()), Column('instance_uuid', String(length=36), nullable=False), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) groups = Table('instance_groups', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('id', Integer, primary_key=True, nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('uuid', String(length=36), nullable=False), Column('name', String(length=255)), UniqueConstraint('uuid', 'deleted', name='uniq_instance_groups0uuid0deleted'), mysql_engine='InnoDB', mysql_charset='utf8', ) group_metadata = Table('instance_group_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('id', Integer, primary_key=True, nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), Column('group_id', Integer, ForeignKey('instance_groups.id'), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) group_policy = Table('instance_group_policy', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('id', Integer, primary_key=True, nullable=False), Column('policy', String(length=255)), Column('group_id', Integer, ForeignKey('instance_groups.id'), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) group_member = Table('instance_group_member', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('id', Integer, primary_key=True, nullable=False), Column('instance_id', String(length=255)), Column('group_id', Integer, ForeignKey('instance_groups.id'), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) instance_metadata = Table('instance_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), Column('instance_uuid', String(length=36), nullable=True), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_system_metadata = Table('instance_system_metadata', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('instance_uuid', String(length=36), nullable=False), Column('key', String(length=255), nullable=False), Column('value', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_type_extra_specs = Table('instance_type_extra_specs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('instance_type_id', Integer, ForeignKey('instance_types.id'), nullable=False), Column('key', String(length=255)), Column('value', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_type_projects = Table('instance_type_projects', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('instance_type_id', Integer, nullable=False), Column('project_id', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_types = Table('instance_types', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('name', String(length=255)), Column('id', Integer, primary_key=True, nullable=False), Column('memory_mb', Integer, nullable=False), Column('vcpus', Integer, nullable=False), Column('swap', Integer, nullable=False), Column('vcpu_weight', Integer), Column('flavorid', String(length=255)), Column('rxtx_factor', Float), Column('root_gb', Integer), Column('ephemeral_gb', Integer), Column('disabled', Boolean), Column('is_public', Boolean), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) inst_lock_enum = Enum('owner', 'admin', name='instances0locked_by') instances = Table('instances', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('internal_id', Integer), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('image_ref', String(length=255)), Column('kernel_id', String(length=255)), Column('ramdisk_id', String(length=255)), Column('launch_index', Integer), Column('key_name', String(length=255)), Column('key_data', MediumText()), Column('power_state', Integer), Column('vm_state', String(length=255)), Column('memory_mb', Integer), Column('vcpus', Integer), Column('hostname', String(length=255)), Column('host', String(length=255)), Column('user_data', MediumText()), Column('reservation_id', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('availability_zone', String(length=255)), Column('locked', Boolean), Column('os_type', String(length=255)), Column('launched_on', MediumText()), Column('instance_type_id', Integer), Column('vm_mode', String(length=255)), Column('uuid', String(length=36)), Column('architecture', String(length=255)), Column('root_device_name', String(length=255)), Column('access_ip_v4', InetSmall()), Column('access_ip_v6', InetSmall()), Column('config_drive', String(length=255)), Column('task_state', String(length=255)), Column('default_ephemeral_device', String(length=255)), Column('default_swap_device', String(length=255)), Column('progress', Integer), Column('auto_disk_config', Boolean), Column('shutdown_terminate', Boolean), Column('disable_terminate', Boolean), Column('root_gb', Integer), Column('ephemeral_gb', Integer), Column('cell_name', String(length=255)), Column('node', String(length=255)), Column('deleted', Integer), Column('locked_by', inst_lock_enum), Column('cleaned', Integer, default=0), mysql_engine='InnoDB', mysql_charset='utf8' ) instance_actions = Table('instance_actions', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('action', String(length=255)), Column('instance_uuid', String(length=36)), Column('request_id', String(length=255)), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('start_time', DateTime), Column('finish_time', DateTime), Column('message', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8', ) instance_actions_events = Table('instance_actions_events', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('event', String(length=255)), Column('action_id', Integer, ForeignKey('instance_actions.id')), Column('start_time', DateTime), Column('finish_time', DateTime), Column('result', String(length=255)), Column('traceback', Text), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8', ) iscsi_targets = Table('iscsi_targets', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('target_num', Integer), Column('host', String(length=255)), Column('volume_id', String(length=36), nullable=True), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) key_pairs = Table('key_pairs', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), Column('user_id', String(length=255)), Column('fingerprint', String(length=255)), Column('public_key', MediumText()), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) migrations = Table('migrations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('source_compute', String(length=255)), Column('dest_compute', String(length=255)), Column('dest_host', String(length=255)), Column('status', String(length=255)), Column('instance_uuid', String(length=36)), Column('old_instance_type_id', Integer), Column('new_instance_type_id', Integer), Column('source_node', String(length=255)), Column('dest_node', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) networks = Table('networks', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('injected', Boolean), Column('cidr', Inet()), Column('netmask', InetSmall()), Column('bridge', String(length=255)), Column('gateway', InetSmall()), Column('broadcast', InetSmall()), Column('dns1', InetSmall()), Column('vlan', Integer), Column('vpn_public_address', InetSmall()), Column('vpn_public_port', Integer), Column('vpn_private_address', InetSmall()), Column('dhcp_start', InetSmall()), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('cidr_v6', Inet()), Column('gateway_v6', InetSmall()), Column('label', String(length=255)), Column('netmask_v6', InetSmall()), Column('bridge_interface', String(length=255)), Column('multi_host', Boolean), Column('dns2', InetSmall()), Column('uuid', String(length=36)), Column('priority', Integer), Column('rxtx_base', Integer), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted' pci_devices = Table('pci_devices', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('deleted', Integer, default=0, nullable=False), Column('id', Integer, primary_key=True), Column('compute_node_id', Integer, nullable=False), Column('address', String(12), nullable=False), Column('product_id', String(4)), Column('vendor_id', String(4)), Column('dev_type', String(8)), Column('dev_id', String(255)), Column('label', String(255), nullable=False), Column('status', String(36), nullable=False), Column('extra_info', Text, nullable=True), Column('instance_uuid', String(36), nullable=True), Index('ix_pci_devices_compute_node_id_deleted', 'compute_node_id', 'deleted'), Index('ix_pci_devices_instance_uuid_deleted', 'instance_uuid', 'deleted'), UniqueConstraint('compute_node_id', 'address', 'deleted', name=pci_devices_uc_name), mysql_engine='InnoDB', mysql_charset='utf8') provider_fw_rules = Table('provider_fw_rules', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('protocol', String(length=5)), Column('from_port', Integer), Column('to_port', Integer), Column('cidr', Inet()), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) quota_classes = Table('quota_classes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('class_name', String(length=255)), Column('resource', String(length=255)), Column('hard_limit', Integer), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) quota_usages = Table('quota_usages', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('project_id', String(length=255)), Column('resource', String(length=255)), Column('in_use', Integer, nullable=False), Column('reserved', Integer, nullable=False), Column('until_refresh', Integer), Column('deleted', Integer), Column('user_id', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) quotas = Table('quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('project_id', String(length=255)), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted" project_user_quotas = Table('project_user_quotas', meta, Column('id', Integer, primary_key=True, nullable=False), Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer), Column('user_id', String(length=255), nullable=False), Column('project_id', String(length=255), nullable=False), Column('resource', String(length=255), nullable=False), Column('hard_limit', Integer, nullable=True), UniqueConstraint('user_id', 'project_id', 'resource', 'deleted', name=uniq_name), mysql_engine='InnoDB', mysql_charset='utf8', ) reservations = Table('reservations', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), Column('usage_id', Integer, nullable=False), Column('project_id', String(length=255)), Column('resource', String(length=255)), Column('delta', Integer, nullable=False), Column('expire', DateTime), Column('deleted', Integer), Column('user_id', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) s3_images = Table('s3_images', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) security_group_instance_association = \ Table('security_group_instance_association', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('security_group_id', Integer), Column('instance_uuid', String(length=36)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) security_group_rules = Table('security_group_rules', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('parent_group_id', Integer, ForeignKey('security_groups.id')), Column('protocol', String(length=255)), Column('from_port', Integer), Column('to_port', Integer), Column('cidr', Inet()), Column('group_id', Integer, ForeignKey('security_groups.id')), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) security_groups = Table('security_groups', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('name', String(length=255)), Column('description', String(length=255)), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) security_group_default_rules = Table('security_group_default_rules', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Integer, default=0), Column('id', Integer, primary_key=True, nullable=False), Column('protocol', String(length=5)), Column('from_port', Integer), Column('to_port', Integer), Column('cidr', Inet()), mysql_engine='InnoDB', mysql_charset='utf8', ) services = Table('services', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('host', String(length=255)), Column('binary', String(length=255)), Column('topic', String(length=255)), Column('report_count', Integer, nullable=False), Column('disabled', Boolean), Column('deleted', Integer), Column('disabled_reason', String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) snapshot_id_mappings = Table('snapshot_id_mappings', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) snapshots = Table('snapshots', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', String(length=36), primary_key=True, nullable=False), Column('volume_id', String(length=36), nullable=False), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('status', String(length=255)), Column('progress', String(length=255)), Column('volume_size', Integer), Column('scheduled_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('deleted', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) task_log = Table('task_log', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('task_name', String(length=255), nullable=False), Column('state', String(length=255), nullable=False), Column('host', String(length=255), nullable=False), Column('period_beginning', DateTime, nullable=False), Column('period_ending', DateTime, nullable=False), Column('message', String(length=255), nullable=False), Column('task_items', Integer), Column('errors', Integer), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) virtual_interfaces = Table('virtual_interfaces', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=255)), Column('network_id', Integer), Column('uuid', String(length=36)), Column('instance_uuid', String(length=36), nullable=True), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_id_mappings = Table('volume_id_mappings', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36), nullable=False), Column('deleted', Integer), mysql_engine='InnoDB', mysql_charset='utf8' ) volumes = Table('volumes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('id', String(length=36), primary_key=True, nullable=False), Column('ec2_id', String(length=255)), Column('user_id', String(length=255)), Column('project_id', String(length=255)), Column('host', String(length=255)), Column('size', Integer), Column('availability_zone', String(length=255)), Column('mountpoint', String(length=255)), Column('status', String(length=255)), Column('attach_status', String(length=255)), Column('scheduled_at', DateTime), Column('launched_at', DateTime), Column('terminated_at', DateTime), Column('display_name', String(length=255)), Column('display_description', String(length=255)), Column('provider_location', String(length=256)), Column('provider_auth', String(length=256)), Column('snapshot_id', String(length=36)), Column('volume_type_id', Integer), Column('instance_uuid', String(length=36)), Column('attach_time', DateTime), Column('deleted', String(length=36)), mysql_engine='InnoDB', mysql_charset='utf8' ) volume_usage_cache = Table('volume_usage_cache', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), Column('id', Integer(), primary_key=True, nullable=False), Column('volume_id', String(36), nullable=False), Column('tot_last_refreshed', DateTime(timezone=False)), Column('tot_reads', BigInteger(), default=0), Column('tot_read_bytes', BigInteger(), default=0), Column('tot_writes', BigInteger(), default=0), Column('tot_write_bytes', BigInteger(), default=0), Column('curr_last_refreshed', DateTime(timezone=False)), Column('curr_reads', BigInteger(), default=0), Column('curr_read_bytes', BigInteger(), default=0), Column('curr_writes', BigInteger(), default=0), Column('curr_write_bytes', BigInteger(), default=0), Column('deleted', Integer), Column("instance_uuid", String(length=36)), Column("project_id", String(length=36)), Column("user_id", String(length=36)), Column("availability_zone", String(length=255)), mysql_engine='InnoDB', mysql_charset='utf8' ) instances.create() Index('project_id', instances.c.project_id).create() Index('uuid', instances.c.uuid, unique=True).create() # create all tables tables = [aggregates, console_pools, instance_types, security_groups, snapshots, volumes, # those that are children and others later agent_builds, aggregate_hosts, aggregate_metadata, block_device_mapping, bw_usage_cache, cells, certificates, compute_node_stats, compute_nodes, consoles, dns_domains, fixed_ips, floating_ips, instance_faults, instance_id_mappings, instance_info_caches, instance_metadata, instance_system_metadata, instance_type_extra_specs, instance_type_projects, instance_actions, instance_actions_events, groups, group_metadata, group_policy, group_member, iscsi_targets, key_pairs, migrations, networks, pci_devices, provider_fw_rules, quota_classes, quota_usages, quotas, project_user_quotas, reservations, s3_images, security_group_instance_association, security_group_rules, security_group_default_rules, services, snapshot_id_mappings, task_log, virtual_interfaces, volume_id_mappings, volume_usage_cache] for table in tables: try: table.create() except Exception: LOG.info(repr(table)) LOG.exception(_('Exception while creating table.')) raise # task log unique constraint task_log_uc = "uniq_task_log0task_name0host0period_beginning0period_ending" task_log_cols = ('task_name', 'host', 'period_beginning', 'period_ending') uc = UniqueConstraint(*task_log_cols, table=task_log, name=task_log_uc) uc.create() # networks unique constraint UniqueConstraint('vlan', 'deleted', table=networks, name='uniq_networks0vlan0deleted').create() # instance_type_name constraint UniqueConstraint('name', 'deleted', table=instance_types, name='uniq_instance_types0name0deleted').create() # flavorid unique constraint UniqueConstraint('flavorid', 'deleted', table=instance_types, name='uniq_instance_types0flavorid0deleted').create() # keypair contraint UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs, name='uniq_key_pairs0user_id0name0deleted').create() # instance_type_projects constraint inst_type_uc_name = 'uniq_instance_type_projects0instance_type_id0' + \ 'project_id0deleted' UniqueConstraint('instance_type_id', 'project_id', 'deleted', table=instance_type_projects, name=inst_type_uc_name).create() # floating_ips unique constraint UniqueConstraint('address', 'deleted', table=floating_ips, name='uniq_floating_ips0address0deleted').create() # instance_info_caches UniqueConstraint('instance_uuid', table=instance_info_caches, name='uniq_instance_info_caches0instance_uuid').create() UniqueConstraint('address', 'deleted', table=virtual_interfaces, name='uniq_virtual_interfaces0address0deleted').create() # cells UniqueConstraint('name', 'deleted', table=cells, name='uniq_cells0name0deleted').create() # security_groups uc = UniqueConstraint('project_id', 'name', 'deleted', table=security_groups, name='uniq_security_groups0project_id0name0deleted') uc.create() # quotas UniqueConstraint('project_id', 'resource', 'deleted', table=quotas, name='uniq_quotas0project_id0resource0deleted').create() # fixed_ips UniqueConstraint('address', 'deleted', table=fixed_ips, name='uniq_fixed_ips0address0deleted').create() # services UniqueConstraint('host', 'topic', 'deleted', table=services, name='uniq_services0host0topic0deleted').create() UniqueConstraint('host', 'binary', 'deleted', table=services, name='uniq_services0host0binary0deleted').create() # agent_builds uc_name = 'uniq_agent_builds0hypervisor0os0architecture0deleted' UniqueConstraint('hypervisor', 'os', 'architecture', 'deleted', table=agent_builds, name=uc_name).create() uc_name = 'uniq_console_pools0host0console_type0compute_host0deleted' UniqueConstraint('host', 'console_type', 'compute_host', 'deleted', table=console_pools, name=uc_name).create() uc_name = 'uniq_aggregate_hosts0host0aggregate_id0deleted' UniqueConstraint('host', 'aggregate_id', 'deleted', table=aggregate_hosts, name=uc_name).create() uc_name = 'uniq_aggregate_metadata0aggregate_id0key0deleted' UniqueConstraint('aggregate_id', 'key', 'deleted', table=aggregate_metadata, name=uc_name).create() uc_name = 'uniq_instance_type_extra_specs0instance_type_id0key0deleted' UniqueConstraint('instance_type_id', 'key', 'deleted', table=instance_type_extra_specs, name=uc_name).create() # created first (to preserve ordering for schema diffs) mysql_pre_indexes = [ Index('instance_type_id', instance_type_projects.c.instance_type_id), Index('project_id', dns_domains.c.project_id), Index('fixed_ip_id', floating_ips.c.fixed_ip_id), Index('network_id', virtual_interfaces.c.network_id), Index('network_id', fixed_ips.c.network_id), Index('fixed_ips_virtual_interface_id_fkey', fixed_ips.c.virtual_interface_id), Index('address', fixed_ips.c.address), Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid), Index('instance_uuid', instance_system_metadata.c.instance_uuid), Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id), Index('snapshot_id', block_device_mapping.c.snapshot_id), Index('usage_id', reservations.c.usage_id), Index('virtual_interfaces_instance_uuid_fkey', virtual_interfaces.c.instance_uuid), Index('volume_id', block_device_mapping.c.volume_id), Index('security_group_id', security_group_instance_association.c.security_group_id), ] # Common indexes (indexes we apply to all databases) # NOTE: order specific for MySQL diff support common_indexes = [ # aggregate_metadata Index('aggregate_metadata_key_idx', aggregate_metadata.c.key), # agent_builds Index('agent_builds_hypervisor_os_arch_idx', agent_builds.c.hypervisor, agent_builds.c.os, agent_builds.c.architecture), # block_device_mapping Index('block_device_mapping_instance_uuid_idx', block_device_mapping.c.instance_uuid), Index('block_device_mapping_instance_uuid_device_name_idx', block_device_mapping.c.instance_uuid, block_device_mapping.c.device_name), # NOTE(dprince): This is now a duplicate index on MySQL and needs to # be removed there. We leave it here so the Index ordering # matches on schema diffs (for MySQL). # See Havana migration 186_new_bdm_format where we dropped the # virtual_name column. # IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839 Index( 'block_device_mapping_instance_uuid_virtual_name_device_name_idx', block_device_mapping.c.instance_uuid, block_device_mapping.c.device_name), Index('block_device_mapping_instance_uuid_volume_id_idx', block_device_mapping.c.instance_uuid, block_device_mapping.c.volume_id), # bw_usage_cache Index('bw_usage_cache_uuid_start_period_idx', bw_usage_cache.c.uuid, bw_usage_cache.c.start_period), Index('certificates_project_id_deleted_idx', certificates.c.project_id, certificates.c.deleted), Index('certificates_user_id_deleted_idx', certificates.c.user_id, certificates.c.deleted), # compute_node_stats Index('ix_compute_node_stats_compute_node_id', compute_node_stats.c.compute_node_id), Index('compute_node_stats_node_id_and_deleted_idx', compute_node_stats.c.compute_node_id, compute_node_stats.c.deleted), # consoles Index('consoles_instance_uuid_idx', consoles.c.instance_uuid), # dns_domains Index('dns_domains_domain_deleted_idx', dns_domains.c.domain, dns_domains.c.deleted), # fixed_ips Index('fixed_ips_host_idx', fixed_ips.c.host), Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id, fixed_ips.c.host, fixed_ips.c.deleted), Index('fixed_ips_address_reserved_network_id_deleted_idx', fixed_ips.c.address, fixed_ips.c.reserved, fixed_ips.c.network_id, fixed_ips.c.deleted), Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address, fixed_ips.c.deleted, fixed_ips.c.allocated), # floating_ips Index('floating_ips_host_idx', floating_ips.c.host), Index('floating_ips_project_id_idx', floating_ips.c.project_id), Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx', floating_ips.c.pool, floating_ips.c.deleted, floating_ips.c.fixed_ip_id, floating_ips.c.project_id), # group_member Index('instance_group_member_instance_idx', group_member.c.instance_id), # group_metadata Index('instance_group_metadata_key_idx', group_metadata.c.key), # group_policy Index('instance_group_policy_policy_idx', group_policy.c.policy), # instances Index('instances_reservation_id_idx', instances.c.reservation_id), Index('instances_terminated_at_launched_at_idx', instances.c.terminated_at, instances.c.launched_at), Index('instances_task_state_updated_at_idx', instances.c.task_state, instances.c.updated_at), Index('instances_host_deleted_idx', instances.c.host, instances.c.deleted), Index('instances_uuid_deleted_idx', instances.c.uuid, instances.c.deleted), Index('instances_host_node_deleted_idx', instances.c.host, instances.c.node, instances.c.deleted), Index('instances_host_deleted_cleaned_idx', instances.c.host, instances.c.deleted, instances.c.cleaned), # instance_actions Index('instance_uuid_idx', instance_actions.c.instance_uuid), Index('request_id_idx', instance_actions.c.request_id), # instance_faults Index('instance_faults_host_idx', instance_faults.c.host), Index('instance_faults_instance_uuid_deleted_created_at_idx', instance_faults.c.instance_uuid, instance_faults.c.deleted, instance_faults.c.created_at), # instance_id_mappings Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid), # instance_metadata Index('instance_metadata_instance_uuid_idx', instance_metadata.c.instance_uuid), # instance_type_extra_specs Index('instance_type_extra_specs_instance_type_id_key_idx', instance_type_extra_specs.c.instance_type_id, instance_type_extra_specs.c.key), # iscsi_targets Index('iscsi_targets_host_idx', iscsi_targets.c.host), Index('iscsi_targets_host_volume_id_deleted_idx', iscsi_targets.c.host, iscsi_targets.c.volume_id, iscsi_targets.c.deleted), # migrations Index('migrations_by_host_nodes_and_status_idx', migrations.c.deleted, migrations.c.source_compute, migrations.c.dest_compute, migrations.c.source_node, migrations.c.dest_node, migrations.c.status), Index('migrations_instance_uuid_and_status_idx', migrations.c.deleted, migrations.c.instance_uuid, migrations.c.status), # networks Index('networks_host_idx', networks.c.host), Index('networks_cidr_v6_idx', networks.c.cidr_v6), Index('networks_bridge_deleted_idx', networks.c.bridge, networks.c.deleted), Index('networks_project_id_deleted_idx', networks.c.project_id, networks.c.deleted), Index('networks_uuid_project_id_deleted_idx', networks.c.uuid, networks.c.project_id, networks.c.deleted), Index('networks_vlan_deleted_idx', networks.c.vlan, networks.c.deleted), # project_user_quotas Index('project_user_quotas_project_id_deleted_idx', project_user_quotas.c.project_id, project_user_quotas.c.deleted), Index('project_user_quotas_user_id_deleted_idx', project_user_quotas.c.user_id, project_user_quotas.c.deleted), # reservations Index('ix_reservations_project_id', reservations.c.project_id), Index('ix_reservations_user_id_deleted', reservations.c.user_id, reservations.c.deleted), Index('reservations_uuid_idx', reservations.c.uuid), # security_group_instance_association Index('security_group_instance_association_instance_uuid_idx', security_group_instance_association.c.instance_uuid), # task_log Index('ix_task_log_period_beginning', task_log.c.period_beginning), Index('ix_task_log_host', task_log.c.host), Index('ix_task_log_period_ending', task_log.c.period_ending), # quota_classes Index('ix_quota_classes_class_name', quota_classes.c.class_name), # quota_usages Index('ix_quota_usages_project_id', quota_usages.c.project_id), Index('ix_quota_usages_user_id_deleted', quota_usages.c.user_id, quota_usages.c.deleted), # volumes Index('volumes_instance_uuid_idx', volumes.c.instance_uuid), ] # MySQL specific indexes if migrate_engine.name == 'mysql': for index in mysql_pre_indexes: index.create(migrate_engine) # mysql-specific index by leftmost 100 chars. (mysql gets angry if the # index key length is too long.) sql = ("create index migrations_by_host_nodes_and_status_idx ON " "migrations (deleted, source_compute(100), dest_compute(100), " "source_node(100), dest_node(100), status)") migrate_engine.execute(sql) # PostgreSQL specific indexes if migrate_engine.name == 'postgresql': Index('address', fixed_ips.c.address).create() # NOTE(dprince): PostgreSQL doesn't allow duplicate indexes # so we skip creation of select indexes (so schemas match exactly). POSTGRES_INDEX_SKIPS = [ # See Havana migration 186_new_bdm_format where we dropped the # virtual_name column. # IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839 'block_device_mapping_instance_uuid_virtual_name_device_name_idx' ] MYSQL_INDEX_SKIPS = [ # we create this one manually for MySQL above 'migrations_by_host_nodes_and_status_idx' ] for index in common_indexes: if migrate_engine.name == 'postgresql' and \ index.name in POSTGRES_INDEX_SKIPS: continue if migrate_engine.name == 'mysql' and \ index.name in MYSQL_INDEX_SKIPS: continue else: index.create(migrate_engine) Index('project_id', dns_domains.c.project_id).drop fkeys = [ [[fixed_ips.c.instance_uuid], [instances.c.uuid], 'fixed_ips_instance_uuid_fkey'], [[block_device_mapping.c.instance_uuid], [instances.c.uuid], 'block_device_mapping_instance_uuid_fkey'], [[instance_info_caches.c.instance_uuid], [instances.c.uuid], 'instance_info_caches_instance_uuid_fkey'], [[instance_metadata.c.instance_uuid], [instances.c.uuid], 'instance_metadata_instance_uuid_fkey'], [[instance_system_metadata.c.instance_uuid], [instances.c.uuid], 'instance_system_metadata_ibfk_1'], [[instance_type_projects.c.instance_type_id], [instance_types.c.id], 'instance_type_projects_ibfk_1'], [[iscsi_targets.c.volume_id], [volumes.c.id], 'iscsi_targets_volume_id_fkey'], [[reservations.c.usage_id], [quota_usages.c.id], 'reservations_ibfk_1'], [[security_group_instance_association.c.instance_uuid], [instances.c.uuid], 'security_group_instance_association_instance_uuid_fkey'], [[security_group_instance_association.c.security_group_id], [security_groups.c.id], 'security_group_instance_association_ibfk_1'], [[virtual_interfaces.c.instance_uuid], [instances.c.uuid], 'virtual_interfaces_instance_uuid_fkey'], [[compute_node_stats.c.compute_node_id], [compute_nodes.c.id], 'fk_compute_node_stats_compute_node_id'], [[compute_nodes.c.service_id], [services.c.id], 'fk_compute_nodes_service_id'], [[instance_actions.c.instance_uuid], [instances.c.uuid], 'fk_instance_actions_instance_uuid'], [[instance_faults.c.instance_uuid], [instances.c.uuid], 'fk_instance_faults_instance_uuid'], [[migrations.c.instance_uuid], [instances.c.uuid], 'fk_migrations_instance_uuid'], ] for fkey_pair in fkeys: if migrate_engine.name == 'mysql': # For MySQL we name our fkeys explicitly so they match Havana fkey = ForeignKeyConstraint(columns=fkey_pair[0], refcolumns=fkey_pair[1], name=fkey_pair[2]) fkey.create() elif migrate_engine.name == 'postgresql': # PostgreSQL names things like it wants (correct and compatible!) fkey = ForeignKeyConstraint(columns=fkey_pair[0], refcolumns=fkey_pair[1]) fkey.create() if migrate_engine.name == "mysql": # In Folsom we explicitly converted migrate_version to UTF8. sql = "ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8;" # Set default DB charset to UTF8. sql += "ALTER DATABASE %s DEFAULT CHARACTER SET utf8;" % \ migrate_engine.url.database migrate_engine.execute(sql) _create_shadow_tables(migrate_engine) # populate initial instance types _populate_instance_types(instance_types) _create_dump_tables(migrate_engine) def downgrade(migrate_engine): raise NotImplementedError('Downgrade from Havana is unsupported.') ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/227_fix_project_user_quotas_resource_length.pynova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/227_fix_project_user_quotas_resource_length.p0000664000567000056700000000277012540642543035072 0ustar jenkinsjenkins00000000000000# Copyright 2013 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import MetaData, String, Table def upgrade(migrate_engine): meta = MetaData(bind=migrate_engine) table = Table('project_user_quotas', meta, autoload=True) col_resource = getattr(table.c, 'resource') if col_resource.type.length == 25: # The resource of project_user_quotas table had been changed to # invalid length(25) since I56ad98d3702f53fe8cfa94093fea89074f7a5e90. # The following code fixes the length for the environments which are # deployed after I56ad98d3702f53fe8cfa94093fea89074f7a5e90. col_resource.alter(type=String(255)) table.update().where(table.c.resource == 'injected_file_content_byt')\ .values(resource='injected_file_content_bytes').execute() def downgrade(migrate_engine): # This migration fixes the resource of project_user_quotas table. # No need to go back and reverse this change. pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/234_add_expire_reservations_index.py0000664000567000056700000000361012540642543033134 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Index, MetaData, Table from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) def _get_deleted_expire_index(table): members = sorted(['deleted', 'expire']) for idx in table.indexes: if sorted(idx.columns.keys()) == members: return idx def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) if _get_deleted_expire_index(reservations): LOG.info(_('Skipped adding reservations_deleted_expire_idx ' 'because an equivalent index already exists.')) return # Based on expire_reservations query # from: nova/db/sqlalchemy/api.py index = Index('reservations_deleted_expire_idx', reservations.c.deleted, reservations.c.expire) index.create(migrate_engine) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine reservations = Table('reservations', meta, autoload=True) index = _get_deleted_expire_index(reservations) if index: index.drop(migrate_engine) else: LOG.info(_('Skipped removing reservations_deleted_expire_idx ' 'because index does not exist.')) nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/versions/220_placeholder.py0000664000567000056700000000165112540642543027315 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is a placeholder for Havana backports. # Do not use this number for new Icehouse work. New Icehouse work starts after # all the placeholders. # # See blueprint backportable-db-migrations-icehouse # http://lists.openstack.org/pipermail/openstack-dev/2013-March/006827.html def upgrade(migrate_engine): pass def downgrade(migration_engine): pass nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/README0000664000567000056700000000015312540642532023100 0ustar jenkinsjenkins00000000000000This is a database migration repository. More information at http://code.google.com/p/sqlalchemy-migrate/ nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/__init__.py0000664000567000056700000000114412540642543024334 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. nova-2014.1.5/nova/db/sqlalchemy/migrate_repo/manage.py0000664000567000056700000000135212540642532024024 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.versioning.shell import main if __name__ == '__main__': main(debug='False', repository='.') nova-2014.1.5/nova/db/sqlalchemy/models.py0000664000567000056700000014432312540642543021412 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for nova data. """ from sqlalchemy import Column, Index, Integer, BigInteger, Enum, String, schema from sqlalchemy.dialects.mysql import MEDIUMTEXT from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float from sqlalchemy.orm import relationship, backref, object_mapper from oslo.config import cfg from nova.db.sqlalchemy import types from nova.openstack.common.db.sqlalchemy import models from nova.openstack.common import timeutils CONF = cfg.CONF BASE = declarative_base() def MediumText(): return Text().with_variant(MEDIUMTEXT(), 'mysql') class NovaBase(models.SoftDeleteMixin, models.TimestampMixin, models.ModelBase): metadata = None def save(self, session=None): from nova.db.sqlalchemy import api if session is None: session = api.get_session() super(NovaBase, self).save(session=session) class Service(BASE, NovaBase): """Represents a running service on a host.""" __tablename__ = 'services' __table_args__ = ( schema.UniqueConstraint("host", "topic", "deleted", name="uniq_services0host0topic0deleted"), schema.UniqueConstraint("host", "binary", "deleted", name="uniq_services0host0binary0deleted") ) id = Column(Integer, primary_key=True) host = Column(String(255)) # , ForeignKey('hosts.id')) binary = Column(String(255)) topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) disabled_reason = Column(String(255)) class ComputeNode(BASE, NovaBase): """Represents a running compute service on a host.""" __tablename__ = 'compute_nodes' __table_args__ = () id = Column(Integer, primary_key=True) service_id = Column(Integer, ForeignKey('services.id'), nullable=False) service = relationship(Service, backref=backref('compute_node'), foreign_keys=service_id, primaryjoin='and_(' 'ComputeNode.service_id == Service.id,' 'ComputeNode.deleted == 0)') vcpus = Column(Integer, nullable=False) memory_mb = Column(Integer, nullable=False) local_gb = Column(Integer, nullable=False) vcpus_used = Column(Integer, nullable=False) memory_mb_used = Column(Integer, nullable=False) local_gb_used = Column(Integer, nullable=False) hypervisor_type = Column(MediumText(), nullable=False) hypervisor_version = Column(Integer, nullable=False) hypervisor_hostname = Column(String(255)) # Free Ram, amount of activity (resize, migration, boot, etc) and # the number of running VM's are a good starting point for what's # important when making scheduling decisions. free_ram_mb = Column(Integer) free_disk_gb = Column(Integer) current_workload = Column(Integer) running_vms = Column(Integer) # Note(masumotok): Expected Strings example: # # '{"arch":"x86_64", # "model":"Nehalem", # "topology":{"sockets":1, "threads":2, "cores":3}, # "features":["tdtscp", "xtpr"]}' # # Points are "json translatable" and it must have all dictionary keys # above, since it is copied from tag of getCapabilities() # (See libvirt.virtConnection). cpu_info = Column(MediumText(), nullable=False) disk_available_least = Column(Integer) host_ip = Column(types.IPAddress()) supported_instances = Column(Text) metrics = Column(Text) # Note(yongli): json string PCI Stats # '{"vendor_id":"8086", "product_id":"1234", "count":3 }' pci_stats = Column(Text) # extra_resources is a json string containing arbitrary # data about additional resources. extra_resources = Column(Text) # json-encode string containing compute node statistics stats = Column(Text, default='{}') class Certificate(BASE, NovaBase): """Represents a x509 certificate.""" __tablename__ = 'certificates' __table_args__ = ( Index('certificates_project_id_deleted_idx', 'project_id', 'deleted'), Index('certificates_user_id_deleted_idx', 'user_id', 'deleted') ) id = Column(Integer, primary_key=True) user_id = Column(String(255)) project_id = Column(String(255)) file_name = Column(String(255)) class Instance(BASE, NovaBase): """Represents a guest VM.""" __tablename__ = 'instances' __table_args__ = ( Index('uuid', 'uuid', unique=True), Index('project_id', 'project_id'), Index('instances_host_deleted_idx', 'host', 'deleted'), Index('instances_reservation_id_idx', 'reservation_id'), Index('instances_terminated_at_launched_at_idx', 'terminated_at', 'launched_at'), Index('instances_uuid_deleted_idx', 'uuid', 'deleted'), Index('instances_task_state_updated_at_idx', 'task_state', 'updated_at'), Index('instances_host_node_deleted_idx', 'host', 'node', 'deleted'), Index('instances_host_deleted_cleaned_idx', 'host', 'deleted', 'cleaned'), ) injected_files = [] id = Column(Integer, primary_key=True, autoincrement=True) @property def name(self): try: base_name = CONF.instance_name_template % self.id except TypeError: # Support templates like "uuid-%(uuid)s", etc. info = {} # NOTE(russellb): Don't use self.iteritems() here, as it will # result in infinite recursion on the name property. for column in iter(object_mapper(self).columns): key = column.name # prevent recursion if someone specifies %(name)s # %(name)s will not be valid. if key == 'name': continue info[key] = self[key] try: base_name = CONF.instance_name_template % info except KeyError: base_name = self.uuid return base_name @property def _extra_keys(self): return ['name'] user_id = Column(String(255)) project_id = Column(String(255)) image_ref = Column(String(255)) kernel_id = Column(String(255)) ramdisk_id = Column(String(255)) hostname = Column(String(255)) launch_index = Column(Integer) key_name = Column(String(255)) key_data = Column(MediumText()) power_state = Column(Integer) vm_state = Column(String(255)) task_state = Column(String(255)) memory_mb = Column(Integer) vcpus = Column(Integer) root_gb = Column(Integer) ephemeral_gb = Column(Integer) ephemeral_key_uuid = Column(String(36)) # This is not related to hostname, above. It refers # to the nova node. host = Column(String(255)) # , ForeignKey('hosts.id')) # To identify the "ComputeNode" which the instance resides in. # This equals to ComputeNode.hypervisor_hostname. node = Column(String(255)) # *not* flavorid, this is the internal primary_key instance_type_id = Column(Integer) user_data = Column(MediumText()) reservation_id = Column(String(255)) scheduled_at = Column(DateTime) launched_at = Column(DateTime) terminated_at = Column(DateTime) availability_zone = Column(String(255)) # User editable field for display in user-facing UIs display_name = Column(String(255)) display_description = Column(String(255)) # To remember on which host an instance booted. # An instance may have moved to another host by live migration. launched_on = Column(MediumText()) # NOTE(jdillaman): locked deprecated in favor of locked_by, # to be removed in Icehouse locked = Column(Boolean) locked_by = Column(Enum('owner', 'admin')) os_type = Column(String(255)) architecture = Column(String(255)) vm_mode = Column(String(255)) uuid = Column(String(36)) root_device_name = Column(String(255)) default_ephemeral_device = Column(String(255)) default_swap_device = Column(String(255)) config_drive = Column(String(255)) # User editable field meant to represent what ip should be used # to connect to the instance access_ip_v4 = Column(types.IPAddress()) access_ip_v6 = Column(types.IPAddress()) auto_disk_config = Column(Boolean()) progress = Column(Integer) # EC2 instance_initiated_shutdown_terminate # True: -> 'terminate' # False: -> 'stop' # Note(maoy): currently Nova will always stop instead of terminate # no matter what the flag says. So we set the default to False. shutdown_terminate = Column(Boolean(), default=False) # EC2 disable_api_termination disable_terminate = Column(Boolean(), default=False) # OpenStack compute cell name. This will only be set at the top of # the cells tree and it'll be a full cell name such as 'api!hop1!hop2' cell_name = Column(String(255)) internal_id = Column(Integer) # Records whether an instance has been deleted from disk cleaned = Column(Integer, default=0) class InstanceInfoCache(BASE, NovaBase): """Represents a cache of information about an instance """ __tablename__ = 'instance_info_caches' __table_args__ = ( schema.UniqueConstraint( "instance_uuid", name="uniq_instance_info_caches0instance_uuid"),) id = Column(Integer, primary_key=True, autoincrement=True) # text column used for storing a json object of network data for api network_info = Column(MediumText()) instance_uuid = Column(String(36), ForeignKey('instances.uuid'), nullable=False) instance = relationship(Instance, backref=backref('info_cache', uselist=False), foreign_keys=instance_uuid, primaryjoin=instance_uuid == Instance.uuid) class InstanceTypes(BASE, NovaBase): """Represents possible flavors for instances. Note: instance_type and flavor are synonyms and the term instance_type is deprecated and in the process of being removed. """ __tablename__ = "instance_types" __table_args__ = ( schema.UniqueConstraint("flavorid", "deleted", name="uniq_instance_types0flavorid0deleted"), schema.UniqueConstraint("name", "deleted", name="uniq_instance_types0name0deleted") ) # Internal only primary key/id id = Column(Integer, primary_key=True) name = Column(String(255)) memory_mb = Column(Integer, nullable=False) vcpus = Column(Integer, nullable=False) root_gb = Column(Integer) ephemeral_gb = Column(Integer) # Public facing id will be renamed public_id flavorid = Column(String(255)) swap = Column(Integer, nullable=False, default=0) rxtx_factor = Column(Float, default=1) vcpu_weight = Column(Integer) disabled = Column(Boolean, default=False) is_public = Column(Boolean, default=True) class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a VM.""" __tablename__ = 'volumes' __table_args__ = ( Index('volumes_instance_uuid_idx', 'instance_uuid'), ) id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default="") @property def name(self): return CONF.volume_name_template % self.id ec2_id = Column(String(255)) user_id = Column(String(255)) project_id = Column(String(255)) snapshot_id = Column(String(36)) host = Column(String(255)) size = Column(Integer) availability_zone = Column(String(255)) instance_uuid = Column(String(36)) mountpoint = Column(String(255)) attach_time = Column(DateTime) status = Column(String(255)) # TODO(vish): enum? attach_status = Column(String(255)) # TODO(vish): enum scheduled_at = Column(DateTime) launched_at = Column(DateTime) terminated_at = Column(DateTime) display_name = Column(String(255)) display_description = Column(String(255)) provider_location = Column(String(256)) provider_auth = Column(String(256)) volume_type_id = Column(Integer) class Quota(BASE, NovaBase): """Represents a single quota override for a project. If there is no row for a given project id and resource, then the default for the quota class is used. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quotas' __table_args__ = ( schema.UniqueConstraint("project_id", "resource", "deleted", name="uniq_quotas0project_id0resource0deleted" ), ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) resource = Column(String(255), nullable=False) hard_limit = Column(Integer) class ProjectUserQuota(BASE, NovaBase): """Represents a single quota override for a user with in a project.""" __tablename__ = 'project_user_quotas' uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted" __table_args__ = ( schema.UniqueConstraint("user_id", "project_id", "resource", "deleted", name=uniq_name), Index('project_user_quotas_project_id_deleted_idx', 'project_id', 'deleted'), Index('project_user_quotas_user_id_deleted_idx', 'user_id', 'deleted') ) id = Column(Integer, primary_key=True, nullable=False) project_id = Column(String(255), nullable=False) user_id = Column(String(255), nullable=False) resource = Column(String(255), nullable=False) hard_limit = Column(Integer) class QuotaClass(BASE, NovaBase): """Represents a single quota override for a quota class. If there is no row for a given quota class and resource, then the default for the deployment is used. If the row is present but the hard limit is Null, then the resource is unlimited. """ __tablename__ = 'quota_classes' __table_args__ = ( Index('ix_quota_classes_class_name', 'class_name'), ) id = Column(Integer, primary_key=True) class_name = Column(String(255)) resource = Column(String(255)) hard_limit = Column(Integer) class QuotaUsage(BASE, NovaBase): """Represents the current usage for a given resource.""" __tablename__ = 'quota_usages' __table_args__ = ( Index('ix_quota_usages_project_id', 'project_id'), ) id = Column(Integer, primary_key=True) project_id = Column(String(255)) user_id = Column(String(255)) resource = Column(String(255), nullable=False) in_use = Column(Integer, nullable=False) reserved = Column(Integer, nullable=False) @property def total(self): return self.in_use + self.reserved until_refresh = Column(Integer) class Reservation(BASE, NovaBase): """Represents a resource reservation for quotas.""" __tablename__ = 'reservations' __table_args__ = ( Index('ix_reservations_project_id', 'project_id'), Index('reservations_uuid_idx', 'uuid'), ) id = Column(Integer, primary_key=True, nullable=False) uuid = Column(String(36), nullable=False) usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False) project_id = Column(String(255)) user_id = Column(String(255)) resource = Column(String(255)) delta = Column(Integer, nullable=False) expire = Column(DateTime) usage = relationship( "QuotaUsage", foreign_keys=usage_id, primaryjoin='and_(Reservation.usage_id == QuotaUsage.id,' 'QuotaUsage.deleted == 0)') class Snapshot(BASE, NovaBase): """Represents a block storage device that can be attached to a VM.""" __tablename__ = 'snapshots' __table_args__ = () id = Column(String(36), primary_key=True, nullable=False) deleted = Column(String(36), default="") @property def name(self): return CONF.snapshot_name_template % self.id @property def volume_name(self): return CONF.volume_name_template % self.volume_id user_id = Column(String(255)) project_id = Column(String(255)) volume_id = Column(String(36), nullable=False) status = Column(String(255)) progress = Column(String(255)) volume_size = Column(Integer) scheduled_at = Column(DateTime) display_name = Column(String(255)) display_description = Column(String(255)) class BlockDeviceMapping(BASE, NovaBase): """Represents block device mapping that is defined by EC2.""" __tablename__ = "block_device_mapping" __table_args__ = ( Index('snapshot_id', 'snapshot_id'), Index('volume_id', 'volume_id'), Index('block_device_mapping_instance_uuid_device_name_idx', 'instance_uuid', 'device_name'), Index('block_device_mapping_instance_uuid_volume_id_idx', 'instance_uuid', 'volume_id'), Index('block_device_mapping_instance_uuid_idx', 'instance_uuid'), #TODO(sshturm) Should be dropped. `virtual_name` was dropped #in 186 migration, #Duplicates `block_device_mapping_instance_uuid_device_name_idx` index. Index("block_device_mapping_instance_uuid_virtual_name" "_device_name_idx", 'instance_uuid', 'device_name'), ) id = Column(Integer, primary_key=True, autoincrement=True) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) instance = relationship(Instance, backref=backref('block_device_mapping'), foreign_keys=instance_uuid, primaryjoin='and_(BlockDeviceMapping.' 'instance_uuid==' 'Instance.uuid,' 'BlockDeviceMapping.deleted==' '0)') source_type = Column(String(255)) destination_type = Column(String(255)) guest_format = Column(String(255)) device_type = Column(String(255)) disk_bus = Column(String(255)) boot_index = Column(Integer) device_name = Column(String(255)) # default=False for compatibility of the existing code. # With EC2 API, # default True for ami specified device. # default False for created with other timing. #TODO(sshturm) add default in db delete_on_termination = Column(Boolean, default=False) snapshot_id = Column(String(36)) volume_id = Column(String(36)) volume_size = Column(Integer) image_id = Column(String(36)) # for no device to suppress devices. no_device = Column(Boolean) connection_info = Column(MediumText()) class IscsiTarget(BASE, NovaBase): """Represents an iscsi target for a given host.""" __tablename__ = 'iscsi_targets' __table_args__ = ( Index('iscsi_targets_volume_id_fkey', 'volume_id'), Index('iscsi_targets_host_idx', 'host'), Index('iscsi_targets_host_volume_id_deleted_idx', 'host', 'volume_id', 'deleted') ) id = Column(Integer, primary_key=True, nullable=False) target_num = Column(Integer) host = Column(String(255)) volume_id = Column(String(36), ForeignKey('volumes.id')) volume = relationship(Volume, backref=backref('iscsi_target', uselist=False), foreign_keys=volume_id, primaryjoin='and_(IscsiTarget.volume_id==Volume.id,' 'IscsiTarget.deleted==0)') class SecurityGroupInstanceAssociation(BASE, NovaBase): __tablename__ = 'security_group_instance_association' __table_args__ = ( Index('security_group_instance_association_instance_uuid_idx', 'instance_uuid'), ) id = Column(Integer, primary_key=True, nullable=False) security_group_id = Column(Integer, ForeignKey('security_groups.id')) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) class SecurityGroup(BASE, NovaBase): """Represents a security group.""" __tablename__ = 'security_groups' __table_args__ = ( Index('uniq_security_groups0project_id0name0deleted', 'project_id', 'name', 'deleted'), ) id = Column(Integer, primary_key=True) name = Column(String(255)) description = Column(String(255)) user_id = Column(String(255)) project_id = Column(String(255)) instances = relationship(Instance, secondary="security_group_instance_association", primaryjoin='and_(' 'SecurityGroup.id == ' 'SecurityGroupInstanceAssociation.security_group_id,' 'SecurityGroupInstanceAssociation.deleted == 0,' 'SecurityGroup.deleted == 0)', secondaryjoin='and_(' 'SecurityGroupInstanceAssociation.instance_uuid == Instance.uuid,' # (anthony) the condition below shouldn't be necessary now that the # association is being marked as deleted. However, removing this # may cause existing deployments to choke, so I'm leaving it 'Instance.deleted == 0)', backref='security_groups') class SecurityGroupIngressRule(BASE, NovaBase): """Represents a rule in a security group.""" __tablename__ = 'security_group_rules' __table_args__ = () id = Column(Integer, primary_key=True) parent_group_id = Column(Integer, ForeignKey('security_groups.id')) parent_group = relationship("SecurityGroup", backref="rules", foreign_keys=parent_group_id, primaryjoin='and_(' 'SecurityGroupIngressRule.parent_group_id == SecurityGroup.id,' 'SecurityGroupIngressRule.deleted == 0)') protocol = Column(String(255)) from_port = Column(Integer) to_port = Column(Integer) cidr = Column(types.CIDR()) # Note: This is not the parent SecurityGroup. It's SecurityGroup we're # granting access for. group_id = Column(Integer, ForeignKey('security_groups.id')) grantee_group = relationship("SecurityGroup", foreign_keys=group_id, primaryjoin='and_(' 'SecurityGroupIngressRule.group_id == SecurityGroup.id,' 'SecurityGroupIngressRule.deleted == 0)') class SecurityGroupIngressDefaultRule(BASE, NovaBase): __tablename__ = 'security_group_default_rules' __table_args__ = () id = Column(Integer, primary_key=True, nullable=False) protocol = Column(String(5)) # "tcp", "udp" or "icmp" from_port = Column(Integer) to_port = Column(Integer) cidr = Column(types.CIDR()) class ProviderFirewallRule(BASE, NovaBase): """Represents a rule in a security group.""" __tablename__ = 'provider_fw_rules' __table_args__ = () id = Column(Integer, primary_key=True, nullable=False) protocol = Column(String(5)) # "tcp", "udp", or "icmp" from_port = Column(Integer) to_port = Column(Integer) cidr = Column(types.CIDR()) class KeyPair(BASE, NovaBase): """Represents a public key pair for ssh.""" __tablename__ = 'key_pairs' __table_args__ = ( schema.UniqueConstraint("user_id", "name", "deleted", name="uniq_key_pairs0user_id0name0deleted"), ) id = Column(Integer, primary_key=True, nullable=False) name = Column(String(255)) user_id = Column(String(255)) fingerprint = Column(String(255)) public_key = Column(MediumText()) class Migration(BASE, NovaBase): """Represents a running host-to-host migration.""" __tablename__ = 'migrations' __table_args__ = ( Index('migrations_instance_uuid_and_status_idx', 'instance_uuid', 'status'), Index('migrations_by_host_nodes_and_status_idx', 'deleted', 'source_compute', 'dest_compute', 'source_node', 'dest_node', 'status'), ) id = Column(Integer, primary_key=True, nullable=False) # NOTE(tr3buchet): the ____compute variables are instance['host'] source_compute = Column(String(255)) dest_compute = Column(String(255)) # nodes are equivalent to a compute node's 'hypervisor_hostname' source_node = Column(String(255)) dest_node = Column(String(255)) # NOTE(tr3buchet): dest_host, btw, is an ip address dest_host = Column(String(255)) old_instance_type_id = Column(Integer()) new_instance_type_id = Column(Integer()) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) #TODO(_cerberus_): enum status = Column(String(255)) instance = relationship("Instance", foreign_keys=instance_uuid, primaryjoin='and_(Migration.instance_uuid == ' 'Instance.uuid, Instance.deleted == ' '0)') class Network(BASE, NovaBase): """Represents a network.""" __tablename__ = 'networks' __table_args__ = ( schema.UniqueConstraint("vlan", "deleted", name="uniq_networks0vlan0deleted"), Index('networks_bridge_deleted_idx', 'bridge', 'deleted'), Index('networks_host_idx', 'host'), Index('networks_project_id_deleted_idx', 'project_id', 'deleted'), Index('networks_uuid_project_id_deleted_idx', 'uuid', 'project_id', 'deleted'), Index('networks_vlan_deleted_idx', 'vlan', 'deleted'), Index('networks_cidr_v6_idx', 'cidr_v6') ) id = Column(Integer, primary_key=True, nullable=False) label = Column(String(255)) injected = Column(Boolean, default=False) cidr = Column(types.CIDR()) cidr_v6 = Column(types.CIDR()) multi_host = Column(Boolean, default=False) gateway_v6 = Column(types.IPAddress()) netmask_v6 = Column(types.IPAddress()) netmask = Column(types.IPAddress()) bridge = Column(String(255)) bridge_interface = Column(String(255)) gateway = Column(types.IPAddress()) broadcast = Column(types.IPAddress()) dns1 = Column(types.IPAddress()) dns2 = Column(types.IPAddress()) vlan = Column(Integer) vpn_public_address = Column(types.IPAddress()) vpn_public_port = Column(Integer) vpn_private_address = Column(types.IPAddress()) dhcp_start = Column(types.IPAddress()) rxtx_base = Column(Integer) project_id = Column(String(255)) priority = Column(Integer) host = Column(String(255)) # , ForeignKey('hosts.id')) uuid = Column(String(36)) class VirtualInterface(BASE, NovaBase): """Represents a virtual interface on an instance.""" __tablename__ = 'virtual_interfaces' __table_args__ = ( schema.UniqueConstraint("address", "deleted", name="uniq_virtual_interfaces0address0deleted"), Index('network_id', 'network_id'), Index('virtual_interfaces_instance_uuid_fkey', 'instance_uuid'), ) id = Column(Integer, primary_key=True, nullable=False) address = Column(String(255)) network_id = Column(Integer) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) uuid = Column(String(36)) # TODO(vish): can these both come from the same baseclass? class FixedIp(BASE, NovaBase): """Represents a fixed ip for an instance.""" __tablename__ = 'fixed_ips' __table_args__ = ( schema.UniqueConstraint( "address", "deleted", name="uniq_fixed_ips0address0deleted"), Index('fixed_ips_virtual_interface_id_fkey', 'virtual_interface_id'), Index('network_id', 'network_id'), Index('address', 'address'), Index('fixed_ips_instance_uuid_fkey', 'instance_uuid'), Index('fixed_ips_host_idx', 'host'), Index('fixed_ips_network_id_host_deleted_idx', 'network_id', 'host', 'deleted'), Index('fixed_ips_address_reserved_network_id_deleted_idx', 'address', 'reserved', 'network_id', 'deleted'), Index('fixed_ips_deleted_allocated_idx', 'address', 'deleted', 'allocated') ) id = Column(Integer, primary_key=True) address = Column(types.IPAddress()) network_id = Column(Integer) virtual_interface_id = Column(Integer) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) # associated means that a fixed_ip has its instance_id column set # allocated means that a fixed_ip has its virtual_interface_id column set #TODO(sshturm) add default in db allocated = Column(Boolean, default=False) # leased means dhcp bridge has leased the ip #TODO(sshturm) add default in db leased = Column(Boolean, default=False) #TODO(sshturm) add default in db reserved = Column(Boolean, default=False) host = Column(String(255)) network = relationship(Network, backref=backref('fixed_ips'), foreign_keys=network_id, primaryjoin='and_(' 'FixedIp.network_id == Network.id,' 'FixedIp.deleted == 0,' 'Network.deleted == 0)') instance = relationship(Instance, foreign_keys=instance_uuid, primaryjoin='and_(' 'FixedIp.instance_uuid == Instance.uuid,' 'FixedIp.deleted == 0,' 'Instance.deleted == 0)') class FloatingIp(BASE, NovaBase): """Represents a floating ip that dynamically forwards to a fixed ip.""" __tablename__ = 'floating_ips' __table_args__ = ( schema.UniqueConstraint("address", "deleted", name="uniq_floating_ips0address0deleted"), Index('fixed_ip_id', 'fixed_ip_id'), Index('floating_ips_host_idx', 'host'), Index('floating_ips_project_id_idx', 'project_id'), Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx', 'pool', 'deleted', 'fixed_ip_id', 'project_id') ) id = Column(Integer, primary_key=True) address = Column(types.IPAddress()) fixed_ip_id = Column(Integer) project_id = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) auto_assigned = Column(Boolean, default=False) #TODO(sshturm) add default in db pool = Column(String(255)) interface = Column(String(255)) fixed_ip = relationship(FixedIp, backref=backref('floating_ips'), foreign_keys=fixed_ip_id, primaryjoin='and_(' 'FloatingIp.fixed_ip_id == FixedIp.id,' 'FloatingIp.deleted == 0,' 'FixedIp.deleted == 0)') class DNSDomain(BASE, NovaBase): """Represents a DNS domain with availability zone or project info.""" __tablename__ = 'dns_domains' __table_args__ = ( Index('project_id', 'project_id'), Index('dns_domains_domain_deleted_idx', 'domain', 'deleted'), ) deleted = Column(Boolean, default=False) domain = Column(String(255), primary_key=True) scope = Column(String(255)) availability_zone = Column(String(255)) project_id = Column(String(255)) class ConsolePool(BASE, NovaBase): """Represents pool of consoles on the same physical node.""" __tablename__ = 'console_pools' __table_args__ = ( schema.UniqueConstraint( "host", "console_type", "compute_host", "deleted", name="uniq_console_pools0host0console_type0compute_host0deleted"), ) id = Column(Integer, primary_key=True) address = Column(types.IPAddress()) username = Column(String(255)) password = Column(String(255)) console_type = Column(String(255)) public_hostname = Column(String(255)) host = Column(String(255)) compute_host = Column(String(255)) class Console(BASE, NovaBase): """Represents a console session for an instance.""" __tablename__ = 'consoles' __table_args__ = ( Index('consoles_instance_uuid_idx', 'instance_uuid'), ) id = Column(Integer, primary_key=True) instance_name = Column(String(255)) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) password = Column(String(255)) port = Column(Integer) pool_id = Column(Integer, ForeignKey('console_pools.id')) pool = relationship(ConsolePool, backref=backref('consoles')) class InstanceMetadata(BASE, NovaBase): """Represents a user-provided metadata key/value pair for an instance.""" __tablename__ = 'instance_metadata' __table_args__ = ( Index('instance_metadata_instance_uuid_idx', 'instance_uuid'), ) id = Column(Integer, primary_key=True) key = Column(String(255)) value = Column(String(255)) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) instance = relationship(Instance, backref="metadata", foreign_keys=instance_uuid, primaryjoin='and_(' 'InstanceMetadata.instance_uuid == ' 'Instance.uuid,' 'InstanceMetadata.deleted == 0)') class InstanceSystemMetadata(BASE, NovaBase): """Represents a system-owned metadata key/value pair for an instance.""" __tablename__ = 'instance_system_metadata' __table_args__ = () id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(255)) instance_uuid = Column(String(36), ForeignKey('instances.uuid'), nullable=False) primary_join = ('and_(InstanceSystemMetadata.instance_uuid == ' 'Instance.uuid, InstanceSystemMetadata.deleted == 0)') instance = relationship(Instance, backref="system_metadata", foreign_keys=instance_uuid, primaryjoin=primary_join) class InstanceTypeProjects(BASE, NovaBase): """Represent projects associated instance_types.""" __tablename__ = "instance_type_projects" __table_args__ = (schema.UniqueConstraint( "instance_type_id", "project_id", "deleted", name="uniq_instance_type_projects0instance_type_id0project_id0deleted" ), ) id = Column(Integer, primary_key=True) instance_type_id = Column(Integer, ForeignKey('instance_types.id'), nullable=False) project_id = Column(String(255)) instance_type = relationship(InstanceTypes, backref="projects", foreign_keys=instance_type_id, primaryjoin='and_(' 'InstanceTypeProjects.instance_type_id == InstanceTypes.id,' 'InstanceTypeProjects.deleted == 0)') class InstanceTypeExtraSpecs(BASE, NovaBase): """Represents additional specs as key/value pairs for an instance_type.""" __tablename__ = 'instance_type_extra_specs' __table_args__ = ( Index('instance_type_extra_specs_instance_type_id_key_idx', 'instance_type_id', 'key'), schema.UniqueConstraint( "instance_type_id", "key", "deleted", name=("uniq_instance_type_extra_specs0" "instance_type_id0key0deleted") ), ) id = Column(Integer, primary_key=True) key = Column(String(255)) value = Column(String(255)) instance_type_id = Column(Integer, ForeignKey('instance_types.id'), nullable=False) instance_type = relationship(InstanceTypes, backref="extra_specs", foreign_keys=instance_type_id, primaryjoin='and_(' 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' 'InstanceTypeExtraSpecs.deleted == 0)') class Cell(BASE, NovaBase): """Represents parent and child cells of this cell. Cells can have multiple parents and children, so there could be any number of entries with is_parent=True or False """ __tablename__ = 'cells' __table_args__ = (schema.UniqueConstraint( "name", "deleted", name="uniq_cells0name0deleted" ), ) id = Column(Integer, primary_key=True) # Name here is the 'short name' of a cell. For instance: 'child1' name = Column(String(255)) api_url = Column(String(255)) transport_url = Column(String(255), nullable=False) weight_offset = Column(Float(), default=0.0) weight_scale = Column(Float(), default=1.0) is_parent = Column(Boolean()) class AggregateHost(BASE, NovaBase): """Represents a host that is member of an aggregate.""" __tablename__ = 'aggregate_hosts' __table_args__ = (schema.UniqueConstraint( "host", "aggregate_id", "deleted", name="uniq_aggregate_hosts0host0aggregate_id0deleted" ), ) id = Column(Integer, primary_key=True, autoincrement=True) host = Column(String(255)) aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) class AggregateMetadata(BASE, NovaBase): """Represents a metadata key/value pair for an aggregate.""" __tablename__ = 'aggregate_metadata' __table_args__ = ( schema.UniqueConstraint("aggregate_id", "key", "deleted", name="uniq_aggregate_metadata0aggregate_id0key0deleted" ), Index('aggregate_metadata_key_idx', 'key'), ) id = Column(Integer, primary_key=True) key = Column(String(255), nullable=False) value = Column(String(255), nullable=False) aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False) class Aggregate(BASE, NovaBase): """Represents a cluster of hosts that exists in this zone.""" __tablename__ = 'aggregates' __table_args__ = () id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) _hosts = relationship(AggregateHost, primaryjoin='and_(' 'Aggregate.id == AggregateHost.aggregate_id,' 'AggregateHost.deleted == 0,' 'Aggregate.deleted == 0)') _metadata = relationship(AggregateMetadata, primaryjoin='and_(' 'Aggregate.id == AggregateMetadata.aggregate_id,' 'AggregateMetadata.deleted == 0,' 'Aggregate.deleted == 0)') @property def _extra_keys(self): return ['hosts', 'metadetails', 'availability_zone'] @property def hosts(self): return [h.host for h in self._hosts] @property def metadetails(self): return dict([(m.key, m.value) for m in self._metadata]) @property def availability_zone(self): if 'availability_zone' not in self.metadetails: return None return self.metadetails['availability_zone'] class AgentBuild(BASE, NovaBase): """Represents an agent build.""" __tablename__ = 'agent_builds' __table_args__ = ( Index('agent_builds_hypervisor_os_arch_idx', 'hypervisor', 'os', 'architecture'), schema.UniqueConstraint("hypervisor", "os", "architecture", "deleted", name="uniq_agent_builds0hypervisor0os0architecture0deleted"), ) id = Column(Integer, primary_key=True) hypervisor = Column(String(255)) os = Column(String(255)) architecture = Column(String(255)) version = Column(String(255)) url = Column(String(255)) md5hash = Column(String(255)) class BandwidthUsage(BASE, NovaBase): """Cache for instance bandwidth usage data pulled from the hypervisor.""" __tablename__ = 'bw_usage_cache' __table_args__ = ( Index('bw_usage_cache_uuid_start_period_idx', 'uuid', 'start_period'), ) id = Column(Integer, primary_key=True, nullable=False) uuid = Column(String(36)) mac = Column(String(255)) start_period = Column(DateTime, nullable=False) last_refreshed = Column(DateTime) bw_in = Column(BigInteger) bw_out = Column(BigInteger) last_ctr_in = Column(BigInteger) last_ctr_out = Column(BigInteger) class VolumeUsage(BASE, NovaBase): """Cache for volume usage data pulled from the hypervisor.""" __tablename__ = 'volume_usage_cache' __table_args__ = () id = Column(Integer, primary_key=True, nullable=False) volume_id = Column(String(36), nullable=False) instance_uuid = Column(String(36)) project_id = Column(String(36)) user_id = Column(String(36)) availability_zone = Column(String(255)) tot_last_refreshed = Column(DateTime) tot_reads = Column(BigInteger, default=0) tot_read_bytes = Column(BigInteger, default=0) tot_writes = Column(BigInteger, default=0) tot_write_bytes = Column(BigInteger, default=0) curr_last_refreshed = Column(DateTime) curr_reads = Column(BigInteger, default=0) curr_read_bytes = Column(BigInteger, default=0) curr_writes = Column(BigInteger, default=0) curr_write_bytes = Column(BigInteger, default=0) class S3Image(BASE, NovaBase): """Compatibility layer for the S3 image service talking to Glance.""" __tablename__ = 's3_images' __table_args__ = () id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) uuid = Column(String(36), nullable=False) class VolumeIdMapping(BASE, NovaBase): """Compatibility layer for the EC2 volume service.""" __tablename__ = 'volume_id_mappings' __table_args__ = () id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) uuid = Column(String(36), nullable=False) class SnapshotIdMapping(BASE, NovaBase): """Compatibility layer for the EC2 snapshot service.""" __tablename__ = 'snapshot_id_mappings' __table_args__ = () id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) uuid = Column(String(36), nullable=False) class InstanceFault(BASE, NovaBase): __tablename__ = 'instance_faults' __table_args__ = ( Index('instance_faults_host_idx', 'host'), Index('instance_faults_instance_uuid_deleted_created_at_idx', 'instance_uuid', 'deleted', 'created_at') ) id = Column(Integer, primary_key=True, nullable=False) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) code = Column(Integer(), nullable=False) message = Column(String(255)) details = Column(MediumText()) host = Column(String(255)) class InstanceAction(BASE, NovaBase): """Track client actions on an instance. The intention is that there will only be one of these per user request. A lookup by (instance_uuid, request_id) should always return a single result. """ __tablename__ = 'instance_actions' __table_args__ = ( Index('instance_uuid_idx', 'instance_uuid'), Index('request_id_idx', 'request_id') ) id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) action = Column(String(255)) instance_uuid = Column(String(36), ForeignKey('instances.uuid')) request_id = Column(String(255)) user_id = Column(String(255)) project_id = Column(String(255)) start_time = Column(DateTime, default=timeutils.utcnow) finish_time = Column(DateTime) message = Column(String(255)) class InstanceActionEvent(BASE, NovaBase): """Track events that occur during an InstanceAction.""" __tablename__ = 'instance_actions_events' __table_args__ = () id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) event = Column(String(255)) action_id = Column(Integer, ForeignKey('instance_actions.id')) start_time = Column(DateTime, default=timeutils.utcnow) finish_time = Column(DateTime) result = Column(String(255)) traceback = Column(Text) host = Column(String(255)) details = Column(Text) class InstanceIdMapping(BASE, NovaBase): """Compatibility layer for the EC2 instance service.""" __tablename__ = 'instance_id_mappings' __table_args__ = ( Index('ix_instance_id_mappings_uuid', 'uuid'), ) id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) uuid = Column(String(36), nullable=False) class TaskLog(BASE, NovaBase): """Audit log for background periodic tasks.""" __tablename__ = 'task_log' __table_args__ = ( schema.UniqueConstraint( 'task_name', 'host', 'period_beginning', 'period_ending', name="uniq_task_log0task_name0host0period_beginning0period_ending" ), Index('ix_task_log_period_beginning', 'period_beginning'), Index('ix_task_log_host', 'host'), Index('ix_task_log_period_ending', 'period_ending'), ) id = Column(Integer, primary_key=True, nullable=False, autoincrement=True) task_name = Column(String(255), nullable=False) state = Column(String(255), nullable=False) host = Column(String(255), nullable=False) period_beginning = Column(DateTime, default=timeutils.utcnow, nullable=False) period_ending = Column(DateTime, default=timeutils.utcnow, nullable=False) message = Column(String(255), nullable=False) task_items = Column(Integer(), default=0) errors = Column(Integer(), default=0) class InstanceGroupMember(BASE, NovaBase): """Represents the members for an instance group.""" __tablename__ = 'instance_group_member' __table_args__ = ( Index('instance_group_member_instance_idx', 'instance_id'), ) id = Column(Integer, primary_key=True, nullable=False) instance_id = Column(String(255)) group_id = Column(Integer, ForeignKey('instance_groups.id'), nullable=False) class InstanceGroupPolicy(BASE, NovaBase): """Represents the policy type for an instance group.""" __tablename__ = 'instance_group_policy' __table_args__ = ( Index('instance_group_policy_policy_idx', 'policy'), ) id = Column(Integer, primary_key=True, nullable=False) policy = Column(String(255)) group_id = Column(Integer, ForeignKey('instance_groups.id'), nullable=False) class InstanceGroupMetadata(BASE, NovaBase): """Represents a key/value pair for an instance group.""" __tablename__ = 'instance_group_metadata' __table_args__ = ( Index('instance_group_metadata_key_idx', 'key'), ) id = Column(Integer, primary_key=True, nullable=False) key = Column(String(255)) value = Column(String(255)) group_id = Column(Integer, ForeignKey('instance_groups.id'), nullable=False) class InstanceGroup(BASE, NovaBase): """Represents an instance group. A group will maintain a collection of instances and the relationship between them. """ __tablename__ = 'instance_groups' __table_args__ = ( schema.UniqueConstraint("uuid", "deleted", name="uniq_instance_groups0uuid0deleted"), ) id = Column(Integer, primary_key=True, autoincrement=True) user_id = Column(String(255)) project_id = Column(String(255)) uuid = Column(String(36), nullable=False) name = Column(String(255)) _policies = relationship(InstanceGroupPolicy, primaryjoin='and_(' 'InstanceGroup.id == InstanceGroupPolicy.group_id,' 'InstanceGroupPolicy.deleted == 0,' 'InstanceGroup.deleted == 0)') _metadata = relationship(InstanceGroupMetadata, primaryjoin='and_(' 'InstanceGroup.id == InstanceGroupMetadata.group_id,' 'InstanceGroupMetadata.deleted == 0,' 'InstanceGroup.deleted == 0)') _members = relationship(InstanceGroupMember, primaryjoin='and_(' 'InstanceGroup.id == InstanceGroupMember.group_id,' 'InstanceGroupMember.deleted == 0,' 'InstanceGroup.deleted == 0)') @property def policies(self): return [p.policy for p in self._policies] @property def metadetails(self): return dict((m.key, m.value) for m in self._metadata) @property def members(self): return [m.instance_id for m in self._members] class PciDevice(BASE, NovaBase): """Represents a PCI host device that can be passed through to instances. """ __tablename__ = 'pci_devices' __table_args__ = ( Index('ix_pci_devices_compute_node_id_deleted', 'compute_node_id', 'deleted'), Index('ix_pci_devices_instance_uuid_deleted', 'instance_uuid', 'deleted'), schema.UniqueConstraint( "compute_node_id", "address", "deleted", name="uniq_pci_devices0compute_node_id0address0deleted") ) id = Column(Integer, primary_key=True) compute_node_id = Column(Integer, ForeignKey('compute_nodes.id'), nullable=False) # physical address of device domain:bus:slot.func (0000:09:01.1) address = Column(String(12), nullable=False) vendor_id = Column(String(4), nullable=False) product_id = Column(String(4), nullable=False) dev_type = Column(String(8), nullable=False) dev_id = Column(String(255)) # label is abstract device name, that is used to unify devices with the # same functionality with different addresses or host. label = Column(String(255), nullable=False) status = Column(String(36), nullable=False) extra_info = Column(Text) instance_uuid = Column(String(36)) instance = relationship(Instance, backref="pci_devices", foreign_keys=instance_uuid, primaryjoin='and_(' 'PciDevice.instance_uuid == Instance.uuid,' 'PciDevice.deleted == 0)') nova-2014.1.5/nova/db/sqlalchemy/__init__.py0000664000567000056700000000162512540642532021661 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import BigInteger from sqlalchemy.ext.compiler import compiles @compiles(BigInteger, 'sqlite') def compile_big_int_sqlite(type_, compiler, **kw): return 'INTEGER' nova-2014.1.5/nova/db/sqlalchemy/api.py0000664000567000056700000066006212540642543020703 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import collections import copy import datetime import functools import sys import time import uuid from oslo.config import cfg import six from sqlalchemy import and_ from sqlalchemy import Boolean from sqlalchemy.exc import DataError from sqlalchemy.exc import IntegrityError from sqlalchemy.exc import NoSuchTableError from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import or_ from sqlalchemy.orm import contains_eager from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.orm import noload from sqlalchemy.schema import Table from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import select from sqlalchemy.sql import func from sqlalchemy import String from nova import block_device from nova.compute import task_states from nova.compute import vm_states import nova.context from nova.db.sqlalchemy import models from nova import exception from nova.openstack.common.db import exception as db_exc from nova.openstack.common.db.sqlalchemy import session as db_session from nova.openstack.common.db.sqlalchemy import utils as sqlalchemyutils from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.openstack.common import uuidutils from nova import quota db_opts = [ cfg.StrOpt('osapi_compute_unique_server_name_scope', default='', help='When set, compute API will consider duplicate hostnames ' 'invalid within the specified scope, regardless of case. ' 'Should be empty, "project" or "global".'), ] connection_opts = [ cfg.StrOpt('slave_connection', secret=True, help='The SQLAlchemy connection string used to connect to the ' 'slave database'), ] CONF = cfg.CONF CONF.register_opts(db_opts) CONF.register_opts(connection_opts, group='database') CONF.import_opt('compute_topic', 'nova.compute.rpcapi') CONF.import_opt('connection', 'nova.openstack.common.db.options', group='database') LOG = logging.getLogger(__name__) _MASTER_FACADE = None _SLAVE_FACADE = None def _create_facade_lazily(use_slave=False): global _MASTER_FACADE global _SLAVE_FACADE return_slave = use_slave and CONF.database.slave_connection if not return_slave: if _MASTER_FACADE is None: _MASTER_FACADE = db_session.EngineFacade( CONF.database.connection, **dict(CONF.database.iteritems()) ) return _MASTER_FACADE else: if _SLAVE_FACADE is None: _SLAVE_FACADE = db_session.EngineFacade( CONF.database.slave_connection, **dict(CONF.database.iteritems()) ) return _SLAVE_FACADE def get_engine(use_slave=False): facade = _create_facade_lazily(use_slave) return facade.get_engine() def get_session(use_slave=False, **kwargs): facade = _create_facade_lazily(use_slave) return facade.get_session(**kwargs) _SHADOW_TABLE_PREFIX = 'shadow_' _DEFAULT_QUOTA_NAME = 'default' PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks'] def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ @functools.wraps(f) def wrapper(*args, **kwargs): nova.context.require_admin_context(args[0]) return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`nova.context.authorize_project_context` and :py:func:`nova.context.authorize_user_context`. The first argument to the wrapped function must be the context. """ @functools.wraps(f) def wrapper(*args, **kwargs): nova.context.require_context(args[0]) return f(*args, **kwargs) return wrapper def require_instance_exists_using_uuid(f): """Decorator to require the specified instance to exist. Requires the wrapped function to use context and instance_uuid as their first two arguments. """ @functools.wraps(f) def wrapper(context, instance_uuid, *args, **kwargs): instance_get_by_uuid(context, instance_uuid) return f(context, instance_uuid, *args, **kwargs) return wrapper def require_aggregate_exists(f): """Decorator to require the specified aggregate to exist. Requires the wrapped function to use context and aggregate_id as their first two arguments. """ @functools.wraps(f) def wrapper(context, aggregate_id, *args, **kwargs): aggregate_get(context, aggregate_id) return f(context, aggregate_id, *args, **kwargs) return wrapper def _retry_on_deadlock(f): """Decorator to retry a DB API call if Deadlock was received.""" @functools.wraps(f) def wrapped(*args, **kwargs): while True: try: return f(*args, **kwargs) except db_exc.DBDeadlock: LOG.warn(_("Deadlock detected when running " "'%(func_name)s': Retrying..."), dict(func_name=f.__name__)) # Retry! time.sleep(0.5) continue functools.update_wrapper(wrapped, f) return wrapped def model_query(context, model, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param use_slave: If true, use slave_connection :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. If set to 'allow_none', restriction includes project_id = None. :param base_model: Where model_query is passed a "model" parameter which is not a subclass of NovaBase, we should pass an extra base_model parameter that is a subclass of NovaBase and corresponds to the model parameter. """ use_slave = kwargs.get('use_slave') or False if CONF.database.slave_connection == '': use_slave = False session = kwargs.get('session') or get_session(use_slave=use_slave) read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only', False) def issubclassof_nova_base(obj): return isinstance(obj, type) and issubclass(obj, models.NovaBase) base_model = model if not issubclassof_nova_base(base_model): base_model = kwargs.get('base_model', None) if not issubclassof_nova_base(base_model): raise Exception(_("model or base_model parameter should be " "subclass of NovaBase")) query = session.query(model, *args) default_deleted_value = base_model.__mapper__.c.deleted.default.arg if read_deleted == 'no': query = query.filter(base_model.deleted == default_deleted_value) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter(base_model.deleted != default_deleted_value) else: raise Exception(_("Unrecognized read_deleted value '%s'") % read_deleted) if nova.context.is_user_context(context) and project_only: if project_only == 'allow_none': query = query.\ filter(or_(base_model.project_id == context.project_id, base_model.project_id == None)) else: query = query.filter_by(project_id=context.project_id) return query def exact_filter(query, model, filters, legal_keys): """Applies exact match filtering to a query. Returns the updated query. Modifies filters argument to remove filters consumed. :param query: query to apply filters to :param model: model object the query applies to, for IN-style filtering :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :param legal_keys: list of keys to apply exact filtering to """ filter_dict = {} # Walk through all the keys for key in legal_keys: # Skip ones we're not filtering on if key not in filters: continue # OK, filtering on this key; what value do we search for? value = filters.pop(key) if key in ('metadata', 'system_metadata'): column_attr = getattr(model, key) if isinstance(value, list): for item in value: for k, v in item.iteritems(): query = query.filter(column_attr.any(key=k)) query = query.filter(column_attr.any(value=v)) else: for k, v in value.iteritems(): query = query.filter(column_attr.any(key=k)) query = query.filter(column_attr.any(value=v)) elif isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(model, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) return query def convert_objects_related_datetimes(values, *datetime_keys): for key in datetime_keys: if key in values and values[key]: if isinstance(values[key], six.string_types): values[key] = timeutils.parse_strtime(values[key]) # NOTE(danms): Strip UTC timezones from datetimes, since they're # stored that way in the database values[key] = values[key].replace(tzinfo=None) return values def _sync_instances(context, project_id, user_id, session): return dict(zip(('instances', 'cores', 'ram'), _instance_data_get_for_user( context, project_id, user_id, session))) def _sync_floating_ips(context, project_id, user_id, session): return dict(floating_ips=_floating_ip_count_by_project( context, project_id, session)) def _sync_fixed_ips(context, project_id, user_id, session): return dict(fixed_ips=_fixed_ip_count_by_project( context, project_id, session)) def _sync_security_groups(context, project_id, user_id, session): return dict(security_groups=_security_group_count_by_project_and_user( context, project_id, user_id, session)) QUOTA_SYNC_FUNCTIONS = { '_sync_instances': _sync_instances, '_sync_floating_ips': _sync_floating_ips, '_sync_fixed_ips': _sync_fixed_ips, '_sync_security_groups': _sync_security_groups, } ################### def constraint(**conditions): return Constraint(conditions) def equal_any(*values): return EqualityCondition(values) def not_equal(*values): return InequalityCondition(values) class Constraint(object): def __init__(self, conditions): self.conditions = conditions def apply(self, model, query): for key, condition in self.conditions.iteritems(): for clause in condition.clauses(getattr(model, key)): query = query.filter(clause) return query class EqualityCondition(object): def __init__(self, values): self.values = values def clauses(self, field): # method signature requires us to return an iterable even if for OR # operator this will actually be a single clause return [or_(*[field == value for value in self.values])] class InequalityCondition(object): def __init__(self, values): self.values = values def clauses(self, field): return [field != value for value in self.values] ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): count = model_query(context, models.Service, session=session).\ filter_by(id=service_id).\ soft_delete(synchronize_session=False) if count == 0: raise exception.ServiceNotFound(service_id=service_id) model_query(context, models.ComputeNode, session=session).\ filter_by(service_id=service_id).\ soft_delete(synchronize_session=False) def _service_get(context, service_id, with_compute_node=True, session=None): query = model_query(context, models.Service, session=session).\ filter_by(id=service_id) if with_compute_node: query = query.options(joinedload('compute_node')) result = query.first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get(context, service_id): return _service_get(context, service_id) @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(topic=topic).\ all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): return model_query(context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() @require_admin_context def service_get_all_by_host(context, host): return model_query(context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def service_get_by_compute_host(context, host): result = model_query(context, models.Service, read_deleted="no").\ options(joinedload('compute_node')).\ filter_by(host=host).\ filter_by(topic=CONF.compute_topic).\ first() if not result: raise exception.ComputeHostNotFound(host=host) return result @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True try: service_ref.save() except db_exc.DBDuplicateEntry as e: if 'binary' in e.columns: raise exception.ServiceBinaryExists(host=values.get('host'), binary=values.get('binary')) raise exception.ServiceTopicExists(host=values.get('host'), topic=values.get('topic')) return service_ref @require_admin_context @_retry_on_deadlock def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = _service_get(context, service_id, with_compute_node=False, session=session) service_ref.update(values) return service_ref ################### def compute_node_get(context, compute_id): return _compute_node_get(context, compute_id) def _compute_node_get(context, compute_id, session=None): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ options(joinedload('service')).\ first() if not result: raise exception.ComputeHostNotFound(host=compute_id) return result @require_admin_context def compute_node_get_by_service_id(context, service_id): result = model_query(context, models.ComputeNode, read_deleted='no').\ filter_by(service_id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def compute_node_get_all(context, no_date_fields): # NOTE(msdubov): Using lower-level 'select' queries and joining the tables # manually here allows to gain 3x speed-up and to have 5x # less network load / memory usage compared to the sqla ORM. engine = get_engine() # Retrieve ComputeNode, Service compute_node = models.ComputeNode.__table__ service = models.Service.__table__ with engine.begin() as conn: redundant_columns = set(['deleted_at', 'created_at', 'updated_at', 'deleted']) if no_date_fields else set([]) def filter_columns(table): return [c for c in table.c if c.name not in redundant_columns] compute_node_query = select(filter_columns(compute_node)).\ where(compute_node.c.deleted == 0).\ order_by(compute_node.c.service_id) compute_node_rows = conn.execute(compute_node_query).fetchall() service_query = select(filter_columns(service)).\ where((service.c.deleted == 0) & (service.c.binary == 'nova-compute')).\ order_by(service.c.id) service_rows = conn.execute(service_query).fetchall() # Join ComputeNode & Service manually. services = {} for proxy in service_rows: services[proxy['id']] = dict(proxy.items()) compute_nodes = [] for proxy in compute_node_rows: node = dict(proxy.items()) node['service'] = services.get(proxy['service_id']) compute_nodes.append(node) return compute_nodes @require_admin_context def compute_node_search_by_hypervisor(context, hypervisor_match): field = models.ComputeNode.hypervisor_hostname return model_query(context, models.ComputeNode).\ options(joinedload('service')).\ filter(field.like('%%%s%%' % hypervisor_match)).\ all() @require_admin_context def compute_node_create(context, values): """Creates a new ComputeNode and populates the capacity fields with the most recent data. """ datetime_keys = ('created_at', 'deleted_at', 'updated_at') convert_objects_related_datetimes(values, *datetime_keys) compute_node_ref = models.ComputeNode() compute_node_ref.update(values) compute_node_ref.save() return compute_node_ref @require_admin_context @_retry_on_deadlock def compute_node_update(context, compute_id, values): """Updates the ComputeNode record with the most recent data.""" session = get_session() with session.begin(): compute_ref = _compute_node_get(context, compute_id, session=session) # Always update this, even if there's going to be no other # changes in data. This ensures that we invalidate the # scheduler cache of compute node data in case of races. values['updated_at'] = timeutils.utcnow() datetime_keys = ('created_at', 'deleted_at', 'updated_at') convert_objects_related_datetimes(values, *datetime_keys) compute_ref.update(values) return compute_ref @require_admin_context def compute_node_delete(context, compute_id): """Delete a ComputeNode record.""" session = get_session() with session.begin(): result = model_query(context, models.ComputeNode, session=session).\ filter_by(id=compute_id).\ soft_delete(synchronize_session=False) if not result: raise exception.ComputeHostNotFound(host=compute_id) def compute_node_statistics(context): """Compute statistics over all compute nodes.""" result = model_query(context, func.count(models.ComputeNode.id), func.sum(models.ComputeNode.vcpus), func.sum(models.ComputeNode.memory_mb), func.sum(models.ComputeNode.local_gb), func.sum(models.ComputeNode.vcpus_used), func.sum(models.ComputeNode.memory_mb_used), func.sum(models.ComputeNode.local_gb_used), func.sum(models.ComputeNode.free_ram_mb), func.sum(models.ComputeNode.free_disk_gb), func.sum(models.ComputeNode.current_workload), func.sum(models.ComputeNode.running_vms), func.sum(models.ComputeNode.disk_available_least), base_model=models.ComputeNode, read_deleted="no").first() # Build a dict of the info--making no assumptions about result fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb', 'current_workload', 'running_vms', 'disk_available_least') return dict((field, int(result[idx] or 0)) for idx, field in enumerate(fields)) ################### @require_admin_context def certificate_create(context, values): certificate_ref = models.Certificate() for (key, value) in values.iteritems(): certificate_ref[key] = value certificate_ref.save() return certificate_ref @require_admin_context def certificate_get_all_by_project(context, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_admin_context def certificate_get_all_by_user(context, user_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ all() @require_admin_context def certificate_get_all_by_user_and_project(context, user_id, project_id): return model_query(context, models.Certificate, read_deleted="no").\ filter_by(user_id=user_id).\ filter_by(project_id=project_id).\ all() ################### @require_context def floating_ip_get(context, id): try: result = model_query(context, models.FloatingIp, project_only=True).\ filter_by(id=id).\ options(joinedload_all('fixed_ip.instance')).\ first() if not result: raise exception.FloatingIpNotFound(id=id) except DataError: msg = _("Invalid floating ip id %s in request") % id LOG.warn(msg) raise exception.InvalidID(id=id) return result @require_context def floating_ip_get_pools(context): pools = [] for result in model_query(context, models.FloatingIp.pool, base_model=models.FloatingIp).distinct(): pools.append({'name': result[0]}) return pools @require_context def floating_ip_allocate_address(context, project_id, pool, auto_assigned=False): nova.context.authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session, read_deleted="no").\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(pool=pool).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id floating_ip_ref['auto_assigned'] = auto_assigned session.add(floating_ip_ref) return floating_ip_ref['address'] @require_context def floating_ip_bulk_create(context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FloatingIp() model.update(ip) try: # NOTE(boris-42): To get existing address we have to do each # time session.flush().. session.add(model) session.flush() except db_exc.DBDuplicateEntry: raise exception.FloatingIpExists(address=ip['address']) def _ip_range_splitter(ips, block_size=256): """Yields blocks of IPs no more than block_size elements long.""" out = [] count = 0 for ip in ips: out.append(ip['address']) count += 1 if count > block_size - 1: yield out out = [] count = 0 if out: yield out @require_context def floating_ip_bulk_destroy(context, ips): session = get_session() with session.begin(): project_id_to_quota_count = collections.defaultdict(int) for ip_block in _ip_range_splitter(ips): # Find any floating IPs that were not auto_assigned and # thus need quota released. query = model_query(context, models.FloatingIp).\ filter(models.FloatingIp.address.in_(ip_block)).\ filter_by(auto_assigned=False) rows = query.all() for row in rows: # The count is negative since we release quota by # reserving negative quota. project_id_to_quota_count[row['project_id']] -= 1 # Delete the floating IPs. model_query(context, models.FloatingIp).\ filter(models.FloatingIp.address.in_(ip_block)).\ soft_delete(synchronize_session='fetch') # Delete the quotas, if needed. for project_id, count in project_id_to_quota_count.iteritems(): try: reservations = quota.QUOTAS.reserve(context, project_id=project_id, floating_ips=count) quota.QUOTAS.commit(context, reservations, project_id=project_id) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_("Failed to update usages bulk " "deallocating floating IP")) @require_context def floating_ip_create(context, values): floating_ip_ref = models.FloatingIp() floating_ip_ref.update(values) try: floating_ip_ref.save() except db_exc.DBDuplicateEntry: raise exception.FloatingIpExists(address=values['address']) return floating_ip_ref def _floating_ip_count_by_project(context, project_id, session=None): nova.context.authorize_project_context(context, project_id) # TODO(tr3buchet): why leave auto_assigned floating IPs out? return model_query(context, models.FloatingIp, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ count() @require_context @_retry_on_deadlock def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): session = get_session() with session.begin(): floating_ip_ref = _floating_ip_get_by_address(context, floating_address, session=session) fixed_ip_ref = model_query(context, models.FixedIp, session=session).\ filter_by(address=fixed_address).\ options(joinedload('network')).\ first() if floating_ip_ref.fixed_ip_id == fixed_ip_ref["id"]: return None floating_ip_ref.fixed_ip_id = fixed_ip_ref["id"] floating_ip_ref.host = host return fixed_ip_ref @require_context def floating_ip_deallocate(context, address): model_query(context, models.FloatingIp).\ filter_by(address=address).\ update({'project_id': None, 'host': None, 'auto_assigned': False}) @require_context def floating_ip_destroy(context, address): model_query(context, models.FloatingIp).\ filter_by(address=address).\ delete() @require_context def floating_ip_disassociate(context, address): session = get_session() with session.begin(): floating_ip_ref = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() if not floating_ip_ref: raise exception.FloatingIpNotFoundForAddress(address=address) fixed_ip_ref = model_query(context, models.FixedIp, session=session).\ filter_by(id=floating_ip_ref['fixed_ip_id']).\ options(joinedload('network')).\ first() floating_ip_ref.fixed_ip_id = None floating_ip_ref.host = None return fixed_ip_ref @require_context def floating_ip_set_auto_assigned(context, address): model_query(context, models.FloatingIp).\ filter_by(address=address).\ update({'auto_assigned': True}) def _floating_ip_get_all(context, session=None): return model_query(context, models.FloatingIp, read_deleted="no", session=session) @require_admin_context def floating_ip_get_all(context): floating_ip_refs = _floating_ip_get_all(context).all() if not floating_ip_refs: raise exception.NoFloatingIpsDefined() return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): floating_ip_refs = _floating_ip_get_all(context).\ filter_by(host=host).\ all() if not floating_ip_refs: raise exception.FloatingIpNotFoundForHost(host=host) return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): nova.context.authorize_project_context(context, project_id) # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? return _floating_ip_get_all(context).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ options(joinedload_all('fixed_ip.instance')).\ all() @require_context def floating_ip_get_by_address(context, address): return _floating_ip_get_by_address(context, address) def _floating_ip_get_by_address(context, address, session=None): # if address string is empty explicitly set it to None if not address: address = None try: result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ options(joinedload_all('fixed_ip.instance')).\ first() if not result: raise exception.FloatingIpNotFoundForAddress(address=address) except DataError: msg = _("Invalid floating IP %s in request") % address LOG.warn(msg) raise exception.InvalidIpAddressError(msg) # If the floating IP has a project ID set, check to make sure # the non-admin user has access. if result.project_id and nova.context.is_user_context(context): nova.context.authorize_project_context(context, result.project_id) return result @require_context def floating_ip_get_by_fixed_address(context, fixed_address): return model_query(context, models.FloatingIp).\ outerjoin(models.FixedIp, models.FixedIp.id == models.FloatingIp.fixed_ip_id).\ filter(models.FixedIp.address == fixed_address).\ all() @require_context def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): return model_query(context, models.FloatingIp).\ filter_by(fixed_ip_id=fixed_ip_id).\ all() @require_context def floating_ip_update(context, address, values): session = get_session() with session.begin(): float_ip_ref = _floating_ip_get_by_address(context, address, session) float_ip_ref.update(values) try: float_ip_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.FloatingIpExists(address=values['address']) return float_ip_ref def _dnsdomain_get(context, session, fqdomain): return model_query(context, models.DNSDomain, session=session, read_deleted="no").\ filter_by(domain=fqdomain).\ with_lockmode('update').\ first() @require_context def dnsdomain_get(context, fqdomain): session = get_session() with session.begin(): return _dnsdomain_get(context, session, fqdomain) def _dnsdomain_get_or_create(context, session, fqdomain): domain_ref = _dnsdomain_get(context, session, fqdomain) if not domain_ref: dns_ref = models.DNSDomain() dns_ref.update({'domain': fqdomain, 'availability_zone': None, 'project_id': None}) return dns_ref return domain_ref @require_admin_context def dnsdomain_register_for_zone(context, fqdomain, zone): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'private' domain_ref.availability_zone = zone session.add(domain_ref) @require_admin_context def dnsdomain_register_for_project(context, fqdomain, project): session = get_session() with session.begin(): domain_ref = _dnsdomain_get_or_create(context, session, fqdomain) domain_ref.scope = 'public' domain_ref.project_id = project session.add(domain_ref) @require_admin_context def dnsdomain_unregister(context, fqdomain): model_query(context, models.DNSDomain).\ filter_by(domain=fqdomain).\ delete() @require_context def dnsdomain_list(context): query = model_query(context, models.DNSDomain, read_deleted="no") return [row.domain for row in query.all()] def dnsdomain_get_all(context): return model_query(context, models.DNSDomain, read_deleted="no").all() ################### @require_admin_context def fixed_ip_associate(context, address, instance_uuid, network_id=None, reserved=False): """Keyword arguments: reserved -- should be a boolean value(True or False), exact value will be used to filter on the fixed ip address """ if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=reserved).\ filter_by(address=address).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if fixed_ip_ref is None: raise exception.FixedIpNotFoundForNetwork(address=address, network_uuid=network_id) if fixed_ip_ref.instance_uuid: raise exception.FixedIpAlreadyInUse(address=address, instance_uuid=instance_uuid) if not fixed_ip_ref.network_id: fixed_ip_ref.network_id = network_id fixed_ip_ref.instance_uuid = instance_uuid session.add(fixed_ip_ref) return fixed_ip_ref @require_admin_context def fixed_ip_associate_pool(context, network_id, instance_uuid=None, host=None): if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): network_or_none = or_(models.FixedIp.network_id == network_id, models.FixedIp.network_id == None) fixed_ip_ref = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter(network_or_none).\ filter_by(reserved=False).\ filter_by(instance_uuid=None).\ filter_by(host=None).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: raise exception.NoMoreFixedIps() if fixed_ip_ref['network_id'] is None: fixed_ip_ref['network'] = network_id if instance_uuid: fixed_ip_ref['instance_uuid'] = instance_uuid if host: fixed_ip_ref['host'] = host session.add(fixed_ip_ref) return fixed_ip_ref @require_context def fixed_ip_create(context, values): fixed_ip_ref = models.FixedIp() fixed_ip_ref.update(values) try: fixed_ip_ref.save() except db_exc.DBDuplicateEntry: raise exception.FixedIpExists(address=values['address']) return fixed_ip_ref @require_context def fixed_ip_bulk_create(context, ips): session = get_session() with session.begin(): for ip in ips: model = models.FixedIp() model.update(ip) try: # NOTE (vsergeyev): To get existing address we have to do each # time session.flush(). # See related note at line 697. session.add(model) session.flush() except db_exc.DBDuplicateEntry: raise exception.FixedIpExists(address=ip['address']) @require_context def fixed_ip_disassociate(context, address): session = get_session() with session.begin(): _fixed_ip_get_by_address(context, address, session=session).\ update({'instance_uuid': None}) @require_admin_context def fixed_ip_disassociate_all_by_timeout(context, host, time): session = get_session() # NOTE(vish): only update fixed ips that "belong" to this # host; i.e. the network host or the instance # host matches. Two queries necessary because # join with update doesn't work. with session.begin(): host_filter = or_(and_(models.Instance.host == host, models.Network.multi_host == True), models.Network.host == host) result = model_query(context, models.FixedIp.id, base_model=models.FixedIp, read_deleted="no", session=session).\ filter(models.FixedIp.allocated == False).\ filter(models.FixedIp.updated_at < time).\ join((models.Network, models.Network.id == models.FixedIp.network_id)).\ join((models.Instance, models.Instance.uuid == models.FixedIp.instance_uuid)).\ filter(host_filter).\ all() fixed_ip_ids = [fip[0] for fip in result] if not fixed_ip_ids: return 0 result = model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.id.in_(fixed_ip_ids)).\ update({'instance_uuid': None, 'leased': False, 'updated_at': timeutils.utcnow()}, synchronize_session='fetch') return result @require_context def fixed_ip_get(context, id, get_network=False): query = model_query(context, models.FixedIp).filter_by(id=id) if get_network: query = query.options(joinedload('network')) result = query.first() if not result: raise exception.FixedIpNotFound(id=id) # FIXME(sirp): shouldn't we just use project_only here to restrict the # results? if (nova.context.is_user_context(context) and result['instance_uuid'] is not None): instance = instance_get_by_uuid(context.elevated(read_deleted='yes'), result['instance_uuid']) nova.context.authorize_project_context(context, instance.project_id) return result @require_admin_context def fixed_ip_get_all(context): result = model_query(context, models.FixedIp, read_deleted="yes").all() if not result: raise exception.NoFixedIpsDefined() return result @require_context def fixed_ip_get_by_address(context, address, columns_to_join=None): return _fixed_ip_get_by_address(context, address, columns_to_join=columns_to_join) def _fixed_ip_get_by_address(context, address, session=None, columns_to_join=None): if session is None: session = get_session() if columns_to_join is None: columns_to_join = [] with session.begin(subtransactions=True): try: result = model_query(context, models.FixedIp, session=session) for column in columns_to_join: result = result.options(joinedload_all(column)) result = result.filter_by(address=address).first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) except DataError: msg = _("Invalid fixed IP Address %s in request") % address LOG.warn(msg) raise exception.FixedIpInvalid(msg) # NOTE(sirp): shouldn't we just use project_only here to restrict the # results? if (nova.context.is_user_context(context) and result['instance_uuid'] is not None): instance = _instance_get_by_uuid( context.elevated(read_deleted='yes'), result['instance_uuid'], session ) nova.context.authorize_project_context(context, instance.project_id) return result @require_admin_context def fixed_ip_get_by_address_detailed(context, address): """:returns: a tuple of (models.FixedIp, models.Network, models.Instance) """ try: result = model_query(context, models.FixedIp, models.Network, models.Instance).\ filter_by(address=address).\ outerjoin((models.Network, models.Network.id == models.FixedIp.network_id)).\ outerjoin((models.Instance, models.Instance.uuid == models.FixedIp.instance_uuid)).\ first() if not result: raise exception.FixedIpNotFoundForAddress(address=address) except DataError: msg = _("Invalid fixed IP Address %s in request") % address LOG.warn(msg) raise exception.FixedIpInvalid(msg) return result @require_context def fixed_ip_get_by_floating_address(context, floating_address): return model_query(context, models.FixedIp).\ outerjoin(models.FloatingIp, models.FloatingIp.fixed_ip_id == models.FixedIp.id).\ filter(models.FloatingIp.address == floating_address).\ first() # NOTE(tr3buchet) please don't invent an exception here, empty list is fine @require_context def fixed_ip_get_by_instance(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(instance_uuid=instance_uuid).\ all() if not result: raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid) return result @require_admin_context def fixed_ip_get_by_host(context, host): session = get_session() with session.begin(): instance_uuids = _instance_get_all_uuids_by_host(context, host, session=session) if not instance_uuids: return [] return model_query(context, models.FixedIp, session=session).\ filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\ all() @require_context def fixed_ip_get_by_network_host(context, network_id, host): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(host=host).\ first() if not result: raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id, host=host) return result @require_context def fixed_ips_by_virtual_interface(context, vif_id): result = model_query(context, models.FixedIp, read_deleted="no").\ filter_by(virtual_interface_id=vif_id).\ all() return result @require_context def fixed_ip_update(context, address, values): session = get_session() with session.begin(): _fixed_ip_get_by_address(context, address, session=session).\ update(values) def _fixed_ip_count_by_project(context, project_id, session=None): nova.context.authorize_project_context(context, project_id) return model_query(context, models.FixedIp.id, base_model=models.FixedIp, read_deleted="no", session=session).\ join((models.Instance, models.Instance.uuid == models.FixedIp.instance_uuid)).\ filter(models.Instance.project_id == project_id).\ count() ################### @require_context def virtual_interface_create(context, values): """Create a new virtual interface record in the database. :param values: = dict containing column values """ try: vif_ref = models.VirtualInterface() vif_ref.update(values) vif_ref.save() except db_exc.DBError: raise exception.VirtualInterfaceCreateException() return vif_ref def _virtual_interface_query(context, session=None, use_slave=False): return model_query(context, models.VirtualInterface, session=session, read_deleted="no", use_slave=use_slave) @require_context def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table. :param vif_id: = id of the virtual interface """ vif_ref = _virtual_interface_query(context).\ filter_by(id=vif_id).\ first() return vif_ref @require_context def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table. :param address: = the address of the interface you're looking to get """ try: vif_ref = _virtual_interface_query(context).\ filter_by(address=address).\ first() except DataError: msg = _("Invalid virtual interface address %s in request") % address LOG.warn(msg) raise exception.InvalidIpAddressError(msg) return vif_ref @require_context def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table. :param vif_uuid: the uuid of the interface you're looking to get """ vif_ref = _virtual_interface_query(context).\ filter_by(uuid=vif_uuid).\ first() return vif_ref @require_context @require_instance_exists_using_uuid def virtual_interface_get_by_instance(context, instance_uuid, use_slave=False): """Gets all virtual interfaces for instance. :param instance_uuid: = uuid of the instance to retrieve vifs for """ vif_refs = _virtual_interface_query(context, use_slave=use_slave).\ filter_by(instance_uuid=instance_uuid).\ all() return vif_refs @require_context def virtual_interface_get_by_instance_and_network(context, instance_uuid, network_id): """Gets virtual interface for instance that's associated with network.""" vif_ref = _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(network_id=network_id).\ first() return vif_ref @require_context def virtual_interface_delete_by_instance(context, instance_uuid): """Delete virtual interface records that are associated with the instance given by instance_id. :param instance_uuid: = uuid of instance """ _virtual_interface_query(context).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() @require_context def virtual_interface_get_all(context): """Get all vifs.""" vif_refs = _virtual_interface_query(context).all() return vif_refs ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _validate_unique_server_name(context, session, name): if not CONF.osapi_compute_unique_server_name_scope: return lowername = name.lower() base_query = model_query(context, models.Instance, session=session, read_deleted=False).\ filter(func.lower(models.Instance.hostname) == lowername) if CONF.osapi_compute_unique_server_name_scope == 'project': instance_with_same_name = base_query.\ filter_by(project_id=context.project_id).\ count() elif CONF.osapi_compute_unique_server_name_scope == 'global': instance_with_same_name = base_query.count() else: msg = _('Unknown osapi_compute_unique_server_name_scope value: %s' ' Flag must be empty, "global" or' ' "project"') % CONF.osapi_compute_unique_server_name_scope LOG.warn(msg) return if instance_with_same_name > 0: raise exception.InstanceExists(name=lowername) def _handle_objects_related_type_conversions(values): """Make sure that certain things in values (which may have come from an objects.instance.Instance object) are in suitable form for the database. """ # NOTE(danms): Make sure IP addresses are passed as strings to # the database engine for key in ('access_ip_v4', 'access_ip_v6'): if key in values and values[key] is not None: values[key] = str(values[key]) datetime_keys = ('created_at', 'deleted_at', 'updated_at', 'launched_at', 'terminated_at', 'scheduled_at') convert_objects_related_datetimes(values, *datetime_keys) @require_context def instance_create(context, values): """Create a new Instance record in the database. context - request context object values - dict containing column values. """ # NOTE(rpodolyaka): create the default security group, if it doesn't exist. # This must be done in a separate transaction, so that this one is not # aborted in case a concurrent one succeeds first and the unique constraint # for security group names is violated by a concurrent INSERT security_group_ensure_default(context) values = values.copy() values['metadata'] = _metadata_refs( values.get('metadata'), models.InstanceMetadata) values['system_metadata'] = _metadata_refs( values.get('system_metadata'), models.InstanceSystemMetadata) _handle_objects_related_type_conversions(values) instance_ref = models.Instance() if not values.get('uuid'): values['uuid'] = str(uuid.uuid4()) instance_ref['info_cache'] = models.InstanceInfoCache() info_cache = values.pop('info_cache', None) if info_cache is not None: instance_ref['info_cache'].update(info_cache) security_groups = values.pop('security_groups', []) instance_ref.update(values) def _get_sec_group_models(session, security_groups): models = [] default_group = _security_group_ensure_default(context, session) if 'default' in security_groups: models.append(default_group) # Generate a new list, so we don't modify the original security_groups = [x for x in security_groups if x != 'default'] if security_groups: models.extend(_security_group_get_by_names(context, session, context.project_id, security_groups)) return models session = get_session() with session.begin(): if 'hostname' in values: _validate_unique_server_name(context, session, values['hostname']) instance_ref.security_groups = _get_sec_group_models(session, security_groups) session.add(instance_ref) # create the instance uuid to ec2_id mapping entry for instance ec2_instance_create(context, instance_ref['uuid']) return instance_ref def _instance_data_get_for_user(context, project_id, user_id, session=None): result = model_query(context, func.count(models.Instance.id), func.sum(models.Instance.vcpus), func.sum(models.Instance.memory_mb), base_model=models.Instance, session=session).\ filter_by(project_id=project_id) if user_id: result = result.filter_by(user_id=user_id).first() else: result = result.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context def instance_destroy(context, instance_uuid, constraint=None): session = get_session() with session.begin(): if uuidutils.is_uuid_like(instance_uuid): instance_ref = _instance_get_by_uuid(context, instance_uuid, session=session) else: raise exception.InvalidUUID(instance_uuid) query = model_query(context, models.Instance, session=session).\ filter_by(uuid=instance_uuid) if constraint is not None: query = constraint.apply(models.Instance, query) count = query.soft_delete() if count == 0: raise exception.ConstraintNotMet() model_query(context, models.SecurityGroupInstanceAssociation, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceInfoCache, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceMetadata, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() model_query(context, models.InstanceFault, session=session).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() return instance_ref @require_context def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): return _instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join, use_slave=use_slave) def _instance_get_by_uuid(context, uuid, session=None, columns_to_join=None, use_slave=False): result = _build_instance_get(context, session=session, columns_to_join=columns_to_join, use_slave=use_slave).\ filter_by(uuid=uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=uuid) return result @require_context def instance_get(context, instance_id, columns_to_join=None): try: result = _build_instance_get(context, columns_to_join=columns_to_join ).filter_by(id=instance_id).first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result except DataError: # NOTE(sdague): catch all in case the db engine chokes on the # id because it's too long of an int to store. msg = _("Invalid instance id %s in request") % instance_id LOG.warn(msg) raise exception.InvalidID(id=instance_id) def _build_instance_get(context, session=None, columns_to_join=None, use_slave=False): query = model_query(context, models.Instance, session=session, project_only=True, use_slave=use_slave).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')) if columns_to_join is None: columns_to_join = ['metadata', 'system_metadata'] for column in columns_to_join: if column in ['info_cache', 'security_groups']: # Already always joined above continue query = query.options(joinedload(column)) #NOTE(alaski) Stop lazy loading of columns not needed. for col in ['metadata', 'system_metadata']: if col not in columns_to_join: query = query.options(noload(col)) return query def _instances_fill_metadata(context, instances, manual_joins=None, use_slave=False): """Selectively fill instances with manually-joined metadata. Note that instance will be converted to a dict. :param context: security context :param instances: list of instances to fill :param manual_joins: list of tables to manually join (can be any combination of 'metadata' and 'system_metadata' or None to take the default of both) """ uuids = [inst['uuid'] for inst in instances] if manual_joins is None: manual_joins = ['metadata', 'system_metadata'] meta = collections.defaultdict(list) if 'metadata' in manual_joins: for row in _instance_metadata_get_multi(context, uuids, use_slave=use_slave): meta[row['instance_uuid']].append(row) sys_meta = collections.defaultdict(list) if 'system_metadata' in manual_joins: for row in _instance_system_metadata_get_multi(context, uuids, use_slave=use_slave): sys_meta[row['instance_uuid']].append(row) pcidevs = collections.defaultdict(list) if 'pci_devices' in manual_joins: for row in _instance_pcidevs_get_multi(context, uuids): pcidevs[row['instance_uuid']].append(row) filled_instances = [] for inst in instances: inst = dict(inst.iteritems()) inst['system_metadata'] = sys_meta[inst['uuid']] inst['metadata'] = meta[inst['uuid']] if 'pci_devices' in manual_joins: inst['pci_devices'] = pcidevs[inst['uuid']] filled_instances.append(inst) return filled_instances def _manual_join_columns(columns_to_join): manual_joins = [] for column in ('metadata', 'system_metadata', 'pci_devices'): if column in columns_to_join: columns_to_join.remove(column) manual_joins.append(column) return manual_joins, columns_to_join @require_context def instance_get_all(context, columns_to_join=None): if columns_to_join is None: columns_to_join = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join = _manual_join_columns(columns_to_join) query = model_query(context, models.Instance) for column in columns_to_join: query = query.options(joinedload(column)) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: query = query.filter_by(project_id=context.project_id) else: query = query.filter_by(user_id=context.user_id) instances = query.all() return _instances_fill_metadata(context, instances, manual_joins) @require_context def instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=None, marker=None, columns_to_join=None, use_slave=False): """Return instances that match all filters. Deleted instances will be returned by default, unless there's a filter that says otherwise. Depending on the name of a filter, matching for that filter is performed using either exact matching or as regular expression matching. Exact matching is applied for the following filters: ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid', 'metadata', 'host', 'system_metadata'] A third type of filter (also using exact matching), filters based on instance metadata tags when supplied under a special key named 'filter'. filters = { 'filter': [ {'name': 'tag-key', 'value': ''}, {'name': 'tag-value', 'value': ''}, {'name': 'tag:', 'value': ''} ] } Special keys are used to tweek the query further: 'changes-since' - only return instances updated after 'deleted' - only return (or exclude) deleted instances 'soft_deleted' - modify behavior of 'deleted' to either include or exclude instances whose vm_state is SOFT_DELETED. """ sort_fn = {'desc': desc, 'asc': asc} if CONF.database.slave_connection == '': use_slave = False session = get_session(use_slave=use_slave) if columns_to_join is None: columns_to_join = ['info_cache', 'security_groups'] manual_joins = ['metadata', 'system_metadata'] else: manual_joins, columns_to_join = _manual_join_columns(columns_to_join) query_prefix = session.query(models.Instance) for column in columns_to_join: query_prefix = query_prefix.options(joinedload(column)) query_prefix = query_prefix.order_by(sort_fn[sort_dir]( getattr(models.Instance, sort_key))) # Make a copy of the filters dictionary to use going forward, as we'll # be modifying it and we shouldn't affect the caller's use of it. filters = filters.copy() if 'changes-since' in filters: changes_since = timeutils.normalize_time(filters['changes-since']) query_prefix = query_prefix.\ filter(models.Instance.updated_at >= changes_since) if 'deleted' in filters: # Instances can be soft or hard deleted and the query needs to # include or exclude both if filters.pop('deleted'): if filters.pop('soft_deleted', True): deleted = or_( models.Instance.deleted == models.Instance.id, models.Instance.vm_state == vm_states.SOFT_DELETED ) query_prefix = query_prefix.\ filter(deleted) else: query_prefix = query_prefix.\ filter(models.Instance.deleted == models.Instance.id) else: query_prefix = query_prefix.\ filter_by(deleted=0) if not filters.pop('soft_deleted', False): # It would be better to have vm_state not be nullable # but until then we test it explicitly as a workaround. not_soft_deleted = or_( models.Instance.vm_state != vm_states.SOFT_DELETED, models.Instance.vm_state == None ) query_prefix = query_prefix.filter(not_soft_deleted) if 'cleaned' in filters: if filters.pop('cleaned'): query_prefix = query_prefix.filter(models.Instance.cleaned == 1) else: query_prefix = query_prefix.filter(models.Instance.cleaned == 0) if not context.is_admin: # If we're not admin context, add appropriate filter.. if context.project_id: filters['project_id'] = context.project_id else: filters['user_id'] = context.user_id # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', 'vm_state', 'instance_type_id', 'uuid', 'metadata', 'host', 'task_state', 'system_metadata'] # Filter the query query_prefix = exact_filter(query_prefix, models.Instance, filters, exact_match_filter_names) query_prefix = regex_filter(query_prefix, models.Instance, filters) query_prefix = tag_filter(context, query_prefix, models.Instance, models.InstanceMetadata, models.InstanceMetadata.instance_uuid, filters) # paginate query if marker is not None: try: marker = _instance_get_by_uuid(context, marker, session=session) except exception.InstanceNotFound: raise exception.MarkerNotFound(marker) query_prefix = sqlalchemyutils.paginate_query(query_prefix, models.Instance, limit, [sort_key, 'created_at', 'id'], marker=marker, sort_dir=sort_dir) return _instances_fill_metadata(context, query_prefix.all(), manual_joins) def tag_filter(context, query, model, model_metadata, model_uuid, filters): """Applies tag filtering to a query. Returns the updated query. This method alters filters to remove keys that are tags. This filters on resources by tags - this method assumes that the caller will take care of access control :param query: query to apply filters to :param model: model object the query applies to :param filters: dictionary of filters """ if filters.get('filter') is None: return query or_query = None def _to_list(val): if isinstance(val, dict): val = val.values() if not isinstance(val, (tuple, list, set)): val = (val,) return val for filter_block in filters['filter']: if not isinstance(filter_block, dict): continue filter_name = filter_block.get('name') if filter_name is None: continue tag_name = filter_name[4:] tag_val = _to_list(filter_block.get('value')) if filter_name.startswith('tag-'): if tag_name not in ['key', 'value']: msg = _("Invalid field name: %s") % tag_name raise exception.InvalidParameterValue(err=msg) subq = getattr(model_metadata, tag_name).in_(tag_val) or_query = subq if or_query is None else or_(or_query, subq) elif filter_name.startswith('tag:'): subq = model_query(context, model_uuid, session=query.session, base_model=model_metadata).\ filter_by(key=tag_name).\ filter(model_metadata.value.in_(tag_val)) query = query.filter(model.uuid.in_(subq)) if or_query is not None: subq = model_query(context, model_uuid, session=query.session, base_model=model_metadata).\ filter(or_query) query = query.filter(model.uuid.in_(subq)) return query def regex_filter(query, model, filters): """Applies regular expression filtering to a query. Returns the updated query. :param query: query to apply filters to :param model: model object the query applies to :param filters: dictionary of filters with regex values """ regexp_op_map = { 'postgresql': '~', 'mysql': 'REGEXP', 'sqlite': 'REGEXP' } db_string = CONF.database.connection.split(':')[0].split('+')[0] db_regexp_op = regexp_op_map.get(db_string, 'LIKE') for filter_name in filters.iterkeys(): try: column_attr = getattr(model, filter_name) except AttributeError: continue if 'property' == type(column_attr).__name__: continue if db_regexp_op == 'LIKE': query = query.filter(column_attr.op(db_regexp_op)( '%' + str(filters[filter_name]) + '%')) else: query = query.filter(column_attr.op(db_regexp_op)( str(filters[filter_name]))) return query @require_context def instance_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None): """Return instances and joins that were active during window.""" session = get_session() query = session.query(models.Instance) query = query.options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) if end: query = query.filter(models.Instance.launched_at < end) if project_id: query = query.filter_by(project_id=project_id) if host: query = query.filter_by(host=host) return _instances_fill_metadata(context, query.all()) def _instance_get_all_query(context, project_only=False, joins=None, use_slave=False): if joins is None: joins = ['info_cache', 'security_groups'] query = model_query(context, models.Instance, project_only=project_only, use_slave=use_slave) for join in joins: query = query.options(joinedload(join)) return query @require_admin_context def instance_get_all_by_host(context, host, columns_to_join=None, use_slave=False): return _instances_fill_metadata(context, _instance_get_all_query(context, use_slave=use_slave).filter_by(host=host).all(), manual_joins=columns_to_join, use_slave=use_slave) def _instance_get_all_uuids_by_host(context, host, session=None): """Return a list of the instance uuids on a given host. Returns a list of UUIDs, not Instance model objects. This internal version allows you to specify a session object as a kwarg. """ uuids = [] for tuple in model_query(context, models.Instance.uuid, read_deleted="no", base_model=models.Instance, session=session).\ filter_by(host=host).\ all(): uuids.append(tuple[0]) return uuids @require_admin_context def instance_get_all_by_host_and_node(context, host, node): return _instances_fill_metadata(context, _instance_get_all_query(context, joins=[]).filter_by(host=host). filter_by(node=node).all(), manual_joins=[]) @require_admin_context def instance_get_all_by_host_and_not_type(context, host, type_id=None): return _instances_fill_metadata(context, _instance_get_all_query(context).filter_by(host=host). filter(models.Instance.instance_type_id != type_id).all()) # NOTE(jkoelker) This is only being left here for compat with floating # ips. Currently the network_api doesn't return floaters # in network_info. Once it starts return the model. This # function and its call in compute/manager.py on 1829 can # go away @require_context def instance_get_floating_address(context, instance_id): instance = instance_get(context, instance_id) fixed_ips = fixed_ip_get_by_instance(context, instance['uuid']) if not fixed_ips: return None # NOTE(tr3buchet): this only gets the first fixed_ip # won't find floating ips associated with other fixed_ips floating_ips = floating_ip_get_by_fixed_address(context, fixed_ips[0]['address']) if not floating_ips: return None # NOTE(vish): this just returns the first floating ip return floating_ips[0]['address'] @require_context def instance_floating_address_get_all(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) fixed_ip_ids = model_query(context, models.FixedIp.id, base_model=models.FixedIp).\ filter_by(instance_uuid=instance_uuid).\ all() if not fixed_ip_ids: raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid) fixed_ip_ids = [fixed_ip_id.id for fixed_ip_id in fixed_ip_ids] floating_ips = model_query(context, models.FloatingIp.address, base_model=models.FloatingIp).\ filter(models.FloatingIp.fixed_ip_id.in_(fixed_ip_ids)).\ all() return [floating_ip.address for floating_ip in floating_ips] # NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0. @require_admin_context def instance_get_all_hung_in_rebooting(context, reboot_window): reboot_window = (timeutils.utcnow() - datetime.timedelta(seconds=reboot_window)) # NOTE(danms): this is only used in the _poll_rebooting_instances() # call in compute/manager, so we can avoid the metadata lookups # explicitly return _instances_fill_metadata(context, model_query(context, models.Instance). filter(models.Instance.updated_at <= reboot_window). filter_by(task_state=task_states.REBOOTING).all(), manual_joins=[]) @require_context def instance_update(context, instance_uuid, values): instance_ref = _instance_update(context, instance_uuid, values)[1] return instance_ref @require_context def instance_update_and_get_original(context, instance_uuid, values, columns_to_join=None): """Set the given properties on an instance and update it. Return a shallow copy of the original instance reference, as well as the updated one. :param context: = request context object :param instance_uuid: = instance uuid :param values: = dict containing column values If "expected_task_state" exists in values, the update can only happen when the task state before update matches expected_task_state. Otherwise a UnexpectedTaskStateError is thrown. :returns: a tuple of the form (old_instance_ref, new_instance_ref) Raises NotFound if instance does not exist. """ return _instance_update(context, instance_uuid, values, copy_old_instance=True, columns_to_join=columns_to_join) # NOTE(danms): This updates the instance's metadata list in-place and in # the database to avoid stale data and refresh issues. It assumes the # delete=True behavior of instance_metadata_update(...) def _instance_metadata_update_in_place(context, instance, metadata_type, model, metadata, session): metadata = dict(metadata) to_delete = [] for keyvalue in instance[metadata_type]: key = keyvalue['key'] if key in metadata: keyvalue['value'] = metadata.pop(key) elif key not in metadata: to_delete.append(keyvalue) for condemned in to_delete: condemned.soft_delete(session=session) for key, value in metadata.iteritems(): newitem = model() newitem.update({'key': key, 'value': value, 'instance_uuid': instance['uuid']}) session.add(newitem) instance[metadata_type].append(newitem) @_retry_on_deadlock def _instance_update(context, instance_uuid, values, copy_old_instance=False, columns_to_join=None): session = get_session() if not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(instance_uuid) with session.begin(): instance_ref = _instance_get_by_uuid(context, instance_uuid, session=session, columns_to_join=columns_to_join) if "expected_task_state" in values: # it is not a db column so always pop out expected = values.pop("expected_task_state") if not isinstance(expected, (tuple, list, set)): expected = (expected,) actual_state = instance_ref["task_state"] if actual_state not in expected: if actual_state == task_states.DELETING: raise exception.UnexpectedDeletingTaskStateError( actual=actual_state, expected=expected) else: raise exception.UnexpectedTaskStateError( actual=actual_state, expected=expected) if "expected_vm_state" in values: expected = values.pop("expected_vm_state") if not isinstance(expected, (tuple, list, set)): expected = (expected,) actual_state = instance_ref["vm_state"] if actual_state not in expected: raise exception.UnexpectedVMStateError(actual=actual_state, expected=expected) instance_hostname = instance_ref['hostname'] or '' if ("hostname" in values and values["hostname"].lower() != instance_hostname.lower()): _validate_unique_server_name(context, session, values['hostname']) if copy_old_instance: old_instance_ref = copy.copy(instance_ref) else: old_instance_ref = None metadata = values.get('metadata') if metadata is not None: _instance_metadata_update_in_place(context, instance_ref, 'metadata', models.InstanceMetadata, values.pop('metadata'), session) system_metadata = values.get('system_metadata') if system_metadata is not None: _instance_metadata_update_in_place(context, instance_ref, 'system_metadata', models.InstanceSystemMetadata, values.pop('system_metadata'), session) _handle_objects_related_type_conversions(values) instance_ref.update(values) session.add(instance_ref) return (old_instance_ref, instance_ref) def instance_add_security_group(context, instance_uuid, security_group_id): """Associate the given security group with the given instance.""" sec_group_ref = models.SecurityGroupInstanceAssociation() sec_group_ref.update({'instance_uuid': instance_uuid, 'security_group_id': security_group_id}) sec_group_ref.save() @require_context def instance_remove_security_group(context, instance_uuid, security_group_id): """Disassociate the given security group from the given instance.""" model_query(context, models.SecurityGroupInstanceAssociation).\ filter_by(instance_uuid=instance_uuid).\ filter_by(security_group_id=security_group_id).\ soft_delete() ################### @require_context def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance :param session: = optional session object """ return model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ first() @require_context def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update :param session: = optional session object """ session = get_session() with session.begin(): info_cache = model_query(context, models.InstanceInfoCache, session=session).\ filter_by(instance_uuid=instance_uuid).\ first() if info_cache and info_cache['deleted']: raise exception.InstanceInfoCacheNotFound( instance_uuid=instance_uuid) elif not info_cache: # NOTE(tr3buchet): just in case someone blows away an instance's # cache entry, re-create it. info_cache = models.InstanceInfoCache() values['instance_uuid'] = instance_uuid try: info_cache.update(values) except db_exc.DBDuplicateEntry: # NOTE(sirp): Possible race if two greenthreads attempt to # recreate the instance cache entry at the same time. First one # wins. pass return info_cache @require_context def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record :param session: = optional session object """ model_query(context, models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() ################### @require_context def key_pair_create(context, values): try: key_pair_ref = models.KeyPair() key_pair_ref.update(values) key_pair_ref.save() return key_pair_ref except db_exc.DBDuplicateEntry: raise exception.KeyPairExists(key_name=values['name']) @require_context def key_pair_destroy(context, user_id, name): nova.context.authorize_user_context(context, user_id) result = model_query(context, models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ soft_delete() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) @require_context def key_pair_get(context, user_id, name): nova.context.authorize_user_context(context, user_id) result = model_query(context, models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ first() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) return result @require_context def key_pair_get_all_by_user(context, user_id): nova.context.authorize_user_context(context, user_id) return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ all() def key_pair_count_by_user(context, user_id): nova.context.authorize_user_context(context, user_id) return model_query(context, models.KeyPair, read_deleted="no").\ filter_by(user_id=user_id).\ count() ################### @require_admin_context def network_associate(context, project_id, network_id=None, force=False): """Associate a project with a network. called by project_get_networks under certain conditions and network manager add_network_to_project() only associate if the project doesn't already have a network or if force is True force solves race condition where a fresh project has multiple instance builds simultaneously picked up by multiple network hosts which attempt to associate the project with multiple networks force should only be used as a direct consequence of user request all automated requests should not use force """ session = get_session() with session.begin(): def network_query(project_filter, id=None): filter_kwargs = {'project_id': project_filter} if id is not None: filter_kwargs['id'] = id return model_query(context, models.Network, session=session, read_deleted="no").\ filter_by(**filter_kwargs).\ with_lockmode('update').\ first() if not force: # find out if project has a network network_ref = network_query(project_id) if force or not network_ref: # in force mode or project doesn't have a network so associate # with a new network # get new network network_ref = network_query(None, network_id) if not network_ref: raise exception.NoMoreNetworks() # associate with network # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues network_ref['project_id'] = project_id session.add(network_ref) return network_ref def _network_ips_query(context, network_id): return model_query(context, models.FixedIp, read_deleted="no").\ filter_by(network_id=network_id) @require_admin_context def network_count_reserved_ips(context, network_id): return _network_ips_query(context, network_id).\ filter_by(reserved=True).\ count() @require_admin_context def network_create_safe(context, values): network_ref = models.Network() network_ref['uuid'] = str(uuid.uuid4()) network_ref.update(values) try: network_ref.save() return network_ref except db_exc.DBDuplicateEntry: raise exception.DuplicateVlan(vlan=values['vlan']) @require_admin_context def network_delete_safe(context, network_id): session = get_session() with session.begin(): result = model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter_by(network_id=network_id).\ filter_by(allocated=True).\ count() if result != 0: raise exception.NetworkInUse(network_id=network_id) network_ref = _network_get(context, network_id=network_id, session=session) model_query(context, models.FixedIp, session=session, read_deleted="no").\ filter_by(network_id=network_id).\ soft_delete() session.delete(network_ref) @require_admin_context def network_disassociate(context, network_id, disassociate_host, disassociate_project): net_update = {} if disassociate_project: net_update['project_id'] = None if disassociate_host: net_update['host'] = None network_update(context, network_id, net_update) def _network_get(context, network_id, session=None, project_only='allow_none'): result = model_query(context, models.Network, session=session, project_only=project_only).\ filter_by(id=network_id).\ first() if not result: raise exception.NetworkNotFound(network_id=network_id) return result @require_context def network_get(context, network_id, project_only='allow_none'): return _network_get(context, network_id, project_only=project_only) @require_context def network_get_all(context, project_only): result = model_query(context, models.Network, read_deleted="no", project_only=project_only).all() if not result: raise exception.NoNetworksFound() return result @require_context def network_get_all_by_uuids(context, network_uuids, project_only): result = model_query(context, models.Network, read_deleted="no", project_only=project_only).\ filter(models.Network.uuid.in_(network_uuids)).\ all() if not result: raise exception.NoNetworksFound() #check if the result contains all the networks #we are looking for for network_uuid in network_uuids: found = False for network in result: if network['uuid'] == network_uuid: found = True break if not found: if project_only: raise exception.NetworkNotFoundForProject( network_uuid=network_uuid, project_id=context.project_id) raise exception.NetworkNotFound(network_id=network_uuid) return result # NOTE(vish): pylint complains because of the long method name, but # it fits with the names of the rest of the methods # pylint: disable=C0103 @require_admin_context def network_get_associated_fixed_ips(context, network_id, host=None): # FIXME(sirp): since this returns fixed_ips, this would be better named # fixed_ip_get_all_by_network. # NOTE(vish): The ugly joins here are to solve a performance issue and # should be removed once we can add and remove leases # without regenerating the whole list vif_and = and_(models.VirtualInterface.id == models.FixedIp.virtual_interface_id, models.VirtualInterface.deleted == 0) inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid, models.Instance.deleted == 0) session = get_session() query = session.query(models.FixedIp.address, models.FixedIp.instance_uuid, models.FixedIp.network_id, models.FixedIp.virtual_interface_id, models.VirtualInterface.address, models.Instance.hostname, models.Instance.updated_at, models.Instance.created_at, models.FixedIp.allocated, models.FixedIp.leased).\ filter(models.FixedIp.deleted == 0).\ filter(models.FixedIp.network_id == network_id).\ filter(models.FixedIp.allocated == True).\ join((models.VirtualInterface, vif_and)).\ join((models.Instance, inst_and)).\ filter(models.FixedIp.instance_uuid != None).\ filter(models.FixedIp.virtual_interface_id != None) if host: query = query.filter(models.Instance.host == host) result = query.all() data = [] for datum in result: cleaned = {} cleaned['address'] = datum[0] cleaned['instance_uuid'] = datum[1] cleaned['network_id'] = datum[2] cleaned['vif_id'] = datum[3] cleaned['vif_address'] = datum[4] cleaned['instance_hostname'] = datum[5] cleaned['instance_updated'] = datum[6] cleaned['instance_created'] = datum[7] cleaned['allocated'] = datum[8] cleaned['leased'] = datum[9] data.append(cleaned) return data def network_in_use_on_host(context, network_id, host): fixed_ips = network_get_associated_fixed_ips(context, network_id, host) return len(fixed_ips) > 0 def _network_get_query(context, session=None): return model_query(context, models.Network, session=session, read_deleted="no") @require_admin_context def network_get_by_uuid(context, uuid): result = _network_get_query(context).filter_by(uuid=uuid).first() if not result: raise exception.NetworkNotFoundForUUID(uuid=uuid) return result @require_admin_context def network_get_by_cidr(context, cidr): result = _network_get_query(context).\ filter(or_(models.Network.cidr == cidr, models.Network.cidr_v6 == cidr)).\ first() if not result: raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @require_admin_context def network_get_all_by_host(context, host): session = get_session() fixed_host_filter = or_(models.FixedIp.host == host, models.Instance.host == host) fixed_ip_query = model_query(context, models.FixedIp.network_id, base_model=models.FixedIp, session=session).\ outerjoin((models.VirtualInterface, models.VirtualInterface.id == models.FixedIp.virtual_interface_id)).\ outerjoin((models.Instance, models.Instance.uuid == models.VirtualInterface.instance_uuid)).\ filter(fixed_host_filter) # NOTE(vish): return networks that have host set # or that have a fixed ip with host set # or that have an instance with host set host_filter = or_(models.Network.host == host, models.Network.id.in_(fixed_ip_query.subquery())) return _network_get_query(context, session=session).\ filter(host_filter).\ all() @require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): network_ref = _network_get_query(context, session=session).\ filter_by(id=network_id).\ with_lockmode('update').\ first() if not network_ref: raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not network_ref['host']: network_ref['host'] = host_id session.add(network_ref) return network_ref['host'] @require_context def network_update(context, network_id, values): session = get_session() with session.begin(): network_ref = _network_get(context, network_id, session=session) network_ref.update(values) try: network_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.DuplicateVlan(vlan=values['vlan']) return network_ref ################### @require_context def quota_get(context, project_id, resource, user_id=None): model = models.ProjectUserQuota if user_id else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: query = query.filter_by(user_id=user_id) result = query.first() if not result: if user_id: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get_all_by_project_and_user(context, project_id, user_id): nova.context.authorize_project_context(context, project_id) user_quotas = model_query(context, models.ProjectUserQuota.resource, models.ProjectUserQuota.hard_limit, base_model=models.ProjectUserQuota).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ all() result = {'project_id': project_id, 'user_id': user_id} for quota in user_quotas: result[quota.resource] = quota.hard_limit return result @require_context def quota_get_all_by_project(context, project_id): nova.context.authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_get_all(context, project_id): nova.context.authorize_project_context(context, project_id) result = model_query(context, models.ProjectUserQuota).\ filter_by(project_id=project_id).\ all() return result @require_admin_context def quota_create(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS quota_ref = models.ProjectUserQuota() if per_user else models.Quota() if per_user: quota_ref.user_id = user_id quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit try: quota_ref.save() except db_exc.DBDuplicateEntry: raise exception.QuotaExists(project_id=project_id, resource=resource) return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit, user_id=None): per_user = user_id and resource not in PER_PROJECT_QUOTAS model = models.ProjectUserQuota if per_user else models.Quota query = model_query(context, model).\ filter_by(project_id=project_id).\ filter_by(resource=resource) if per_user: query = query.filter_by(user_id=user_id) result = query.update({'hard_limit': limit}) if not result: if per_user: raise exception.ProjectUserQuotaNotFound(project_id=project_id, user_id=user_id) else: raise exception.ProjectQuotaNotFound(project_id=project_id) ################### @require_context def quota_class_get(context, class_name, resource): result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result def quota_class_get_default(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).\ all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): nova.context.authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit quota_class_ref.save() return quota_class_ref @require_admin_context def quota_class_update(context, class_name, resource, limit): result = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ update({'hard_limit': limit}) if not result: raise exception.QuotaClassNotFound(class_name=class_name) ################### @require_context def quota_usage_get(context, project_id, resource, user_id=None): query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource) if user_id: if resource not in PER_PROJECT_QUOTAS: result = query.filter_by(user_id=user_id).first() else: result = query.filter_by(user_id=None).first() else: result = query.first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result def _quota_usage_get_all(context, project_id, user_id=None): nova.context.authorize_project_context(context, project_id) query = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id) result = {'project_id': project_id} if user_id: query = query.filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id == None)) result['user_id'] = user_id rows = query.all() for row in rows: if row.resource in result: result[row.resource]['in_use'] += row.in_use result[row.resource]['reserved'] += row.reserved else: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_context def quota_usage_get_all_by_project_and_user(context, project_id, user_id): return _quota_usage_get_all(context, project_id, user_id=user_id) @require_context def quota_usage_get_all_by_project(context, project_id): return _quota_usage_get_all(context, project_id) def _quota_usage_create(context, project_id, user_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.user_id = user_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh # updated_at is needed for judgement of max_age quota_usage_ref.updated_at = timeutils.utcnow() quota_usage_ref.save(session=session) return quota_usage_ref @require_admin_context def quota_usage_update(context, project_id, user_id, resource, **kwargs): updates = {} for key in ['in_use', 'reserved', 'until_refresh']: if key in kwargs: updates[key] = kwargs[key] result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ filter(or_(models.QuotaUsage.user_id == user_id, models.QuotaUsage.user_id == None)).\ update(updates) if not result: raise exception.QuotaUsageNotFound(project_id=project_id) ################### def _reservation_create(context, uuid, usage, project_id, user_id, resource, delta, expire, session=None): reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage['id'] reservation_ref.project_id = project_id reservation_ref.user_id = user_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_project_user_quota_usages(context, session, project_id, user_id): rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ with_lockmode('update').\ all() proj_result = dict() user_result = dict() # Get the total count of in_use,reserved for row in rows: proj_result.setdefault(row.resource, dict(in_use=0, reserved=0, total=0)) proj_result[row.resource]['in_use'] += row.in_use proj_result[row.resource]['reserved'] += row.reserved proj_result[row.resource]['total'] += (row.in_use + row.reserved) if row.user_id is None or row.user_id == user_id: user_result[row.resource] = row return proj_result, user_result @require_context @_retry_on_deadlock def quota_reserve(context, resources, project_quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id if user_id is None: user_id = context.user_id # Get the current usages project_usages, user_usages = _get_project_user_quota_usages( context, session, project_id, user_id) # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? refresh = False if ((resource not in PER_PROJECT_QUOTAS) and (resource not in user_usages)): user_usages[resource] = _quota_usage_create(elevated, project_id, user_id, resource, 0, 0, until_refresh or None, session=session) refresh = True elif ((resource in PER_PROJECT_QUOTAS) and (resource not in user_usages)): user_usages[resource] = _quota_usage_create(elevated, project_id, None, resource, 0, 0, until_refresh or None, session=session) refresh = True elif user_usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif user_usages[resource].until_refresh is not None: user_usages[resource].until_refresh -= 1 if user_usages[resource].until_refresh <= 0: refresh = True elif max_age and (user_usages[resource].updated_at - timeutils.utcnow()).seconds >= max_age: refresh = True # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] updates = sync(elevated, project_id, user_id, session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if ((res not in PER_PROJECT_QUOTAS) and (res not in user_usages)): user_usages[res] = _quota_usage_create(elevated, project_id, user_id, res, 0, 0, until_refresh or None, session=session) if ((res in PER_PROJECT_QUOTAS) and (res not in user_usages)): user_usages[res] = _quota_usage_create(elevated, project_id, None, res, 0, 0, until_refresh or None, session=session) if user_usages[res].in_use != in_use: LOG.debug(_('quota_usages out of sync, updating. ' 'project_id: %(project_id)s, ' 'user_id: %(user_id)s, ' 'resource: %(res)s, ' 'tracked usage: %(tracked_use)s, ' 'actual usage: %(in_use)s'), {'project_id': project_id, 'user_id': user_id, 'res': res, 'tracked_use': user_usages[res].in_use, 'in_use': in_use}) # Update the usage user_usages[res].in_use = in_use user_usages[res].until_refresh = until_refresh or None # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative unders = [res for res, delta in deltas.items() if delta < 0 and delta + user_usages[res].in_use < 0] # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. for key, value in user_usages.items(): if key not in project_usages: project_usages[key] = value overs = [res for res, delta in deltas.items() if user_quotas[res] >= 0 and delta >= 0 and (project_quotas[res] < delta + project_usages[res]['total'] or user_quotas[res] < delta + user_usages[res].total)] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for res, delta in deltas.items(): reservation = _reservation_create(elevated, str(uuid.uuid4()), user_usages[res], project_id, user_id, res, delta, expire, session=session) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0: user_usages[res].reserved += delta # Apply updates to the usages table for usage_ref in user_usages.values(): session.add(usage_ref) if unders: LOG.warning(_("Change will make usage less than 0 for the following " "resources: %s"), unders) if overs: if project_quotas == user_quotas: usages = project_usages else: usages = user_usages usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) for k, v in usages.items()) headroom = dict((res, user_quotas[res] - (usages[res]['in_use'] + usages[res]['reserved'])) for res in user_quotas.keys()) # If quota_cores is unlimited [-1]: # - set cores headroom based on instances headroom: if user_quotas.get('cores') == -1: if deltas['cores']: hc = headroom['instances'] * deltas['cores'] headroom['cores'] = hc / deltas['instances'] else: headroom['cores'] = headroom['instances'] # If quota_ram is unlimited [-1]: # - set ram headroom based on instances headroom: if user_quotas.get('ram') == -1: if deltas['ram']: hr = headroom['instances'] * deltas['ram'] headroom['ram'] = hr / deltas['instances'] else: headroom['ram'] = headroom['instances'] raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas, usages=usages, headroom=headroom) return reservations def _quota_reservations_query(session, context, reservations): """Return the relevant reservations.""" # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_lockmode('update') @require_context @_retry_on_deadlock def reservation_commit(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): _project_usages, user_usages = _get_project_user_quota_usages( context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = user_usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation_query.soft_delete(synchronize_session=False) @require_context @_retry_on_deadlock def reservation_rollback(context, reservations, project_id=None, user_id=None): session = get_session() with session.begin(): _project_usages, user_usages = _get_project_user_quota_usages( context, session, project_id, user_id) reservation_query = _quota_reservations_query(session, context, reservations) for reservation in reservation_query.all(): usage = user_usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation_query.soft_delete(synchronize_session=False) @require_admin_context def quota_destroy_all_by_project_and_user(context, project_id, user_id): session = get_session() with session.begin(): model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ soft_delete(synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ soft_delete(synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ soft_delete(synchronize_session=False) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.ProjectUserQuota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) @require_admin_context @_retry_on_deadlock def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() reservation_query = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time) for reservation in reservation_query.join(models.QuotaUsage).all(): if reservation.delta >= 0: reservation.usage.reserved -= reservation.delta session.add(reservation.usage) reservation_query.soft_delete(synchronize_session=False) ################### def _ec2_volume_get_query(context, session=None): return model_query(context, models.VolumeIdMapping, session=session, read_deleted='yes') def _ec2_snapshot_get_query(context, session=None): return model_query(context, models.SnapshotIdMapping, session=session, read_deleted='yes') @require_context def ec2_volume_create(context, volume_uuid, id=None): """Create ec2 compatible volume by provided uuid.""" ec2_volume_ref = models.VolumeIdMapping() ec2_volume_ref.update({'uuid': volume_uuid}) if id is not None: ec2_volume_ref.update({'id': id}) ec2_volume_ref.save() return ec2_volume_ref @require_context def get_ec2_volume_id_by_uuid(context, volume_id): result = _ec2_volume_get_query(context).\ filter_by(uuid=volume_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result['id'] @require_context def get_volume_uuid_by_ec2_id(context, ec2_id): result = _ec2_volume_get_query(context).\ filter_by(id=ec2_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=ec2_id) return result['uuid'] @require_context def ec2_snapshot_create(context, snapshot_uuid, id=None): """Create ec2 compatible snapshot by provided uuid.""" ec2_snapshot_ref = models.SnapshotIdMapping() ec2_snapshot_ref.update({'uuid': snapshot_uuid}) if id is not None: ec2_snapshot_ref.update({'id': id}) ec2_snapshot_ref.save() return ec2_snapshot_ref @require_context def get_ec2_snapshot_id_by_uuid(context, snapshot_id): result = _ec2_snapshot_get_query(context).\ filter_by(uuid=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result['id'] @require_context def get_snapshot_uuid_by_ec2_id(context, ec2_id): result = _ec2_snapshot_get_query(context).\ filter_by(id=ec2_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=ec2_id) return result['uuid'] ################### def _block_device_mapping_get_query(context, session=None, columns_to_join=None, use_slave=False): if columns_to_join is None: columns_to_join = [] query = model_query(context, models.BlockDeviceMapping, session=session, use_slave=use_slave) for column in columns_to_join: query = query.options(joinedload(column)) return query def _scrub_empty_str_values(dct, keys_to_scrub): """Remove any keys found in sequence keys_to_scrub from the dict if they have the value ''. """ for key in keys_to_scrub: if key in dct and dct[key] == '': del dct[key] def _from_legacy_values(values, legacy, allow_updates=False): if legacy: if allow_updates and block_device.is_safe_for_update(values): return values else: return block_device.BlockDeviceDict.from_legacy(values) else: return values @require_context def block_device_mapping_create(context, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy) bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) bdm_ref.save() return bdm_ref @require_context def block_device_mapping_update(context, bdm_id, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy, allow_updates=True) query = _block_device_mapping_get_query(context).filter_by(id=bdm_id) query.update(values) return query.first() def block_device_mapping_update_or_create(context, values, legacy=True): _scrub_empty_str_values(values, ['volume_size']) values = _from_legacy_values(values, legacy, allow_updates=True) session = get_session() with session.begin(): result = None # NOTE(xqueralt): Only update a BDM when device_name was provided. We # allow empty device names so they will be set later by the manager. if values['device_name']: query = _block_device_mapping_get_query(context, session=session) result = query.filter_by(instance_uuid=values['instance_uuid'], device_name=values['device_name']).first() if result: result.update(values) else: # Either the device_name doesn't exist in the database yet, or no # device_name was provided. Both cases mean creating a new BDM. result = models.BlockDeviceMapping(**values) result.save(session=session) # NOTE(xqueralt): Prevent from having multiple swap devices for the # same instance. This will delete all the existing ones. if block_device.new_format_is_swap(values): query = _block_device_mapping_get_query(context, session=session) query = query.filter_by(instance_uuid=values['instance_uuid'], source_type='blank', guest_format='swap') query = query.filter(models.BlockDeviceMapping.id != result.id) query.soft_delete() return result @require_context def block_device_mapping_get_all_by_instance(context, instance_uuid, use_slave=False): return _block_device_mapping_get_query(context, use_slave=use_slave).\ filter_by(instance_uuid=instance_uuid).\ all() @require_context def block_device_mapping_get_by_volume_id(context, volume_id, columns_to_join=None): return _block_device_mapping_get_query(context, columns_to_join=columns_to_join).\ filter_by(volume_id=volume_id).\ first() @require_context def block_device_mapping_destroy(context, bdm_id): _block_device_mapping_get_query(context).\ filter_by(id=bdm_id).\ soft_delete() @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id): _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(volume_id=volume_id).\ soft_delete() @require_context def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid, device_name): _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(device_name=device_name).\ soft_delete() ################### def _security_group_create(context, values, session=None): security_group_ref = models.SecurityGroup() # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception # once save() is called. This will get cleaned up in next orm pass. security_group_ref.rules security_group_ref.update(values) try: security_group_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.SecurityGroupExists( project_id=values['project_id'], security_group_name=values['name']) return security_group_ref def _security_group_get_query(context, session=None, read_deleted=None, project_only=False, join_rules=True): query = model_query(context, models.SecurityGroup, session=session, read_deleted=read_deleted, project_only=project_only) if join_rules: query = query.options(joinedload_all('rules.grantee_group')) return query def _security_group_get_by_names(context, session, project_id, group_names): """Get security group models for a project by a list of names. Raise SecurityGroupNotFoundForProject for a name not found. """ query = _security_group_get_query(context, session=session, read_deleted="no", join_rules=False).\ filter_by(project_id=project_id).\ filter(models.SecurityGroup.name.in_(group_names)) sg_models = query.all() if len(sg_models) == len(group_names): return sg_models # Find the first one missing and raise group_names_from_models = [x.name for x in sg_models] for group_name in group_names: if group_name not in group_names_from_models: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) # Not Reached @require_context def security_group_get_all(context): return _security_group_get_query(context).all() @require_context def security_group_get(context, security_group_id, columns_to_join=None): query = _security_group_get_query(context, project_only=True).\ filter_by(id=security_group_id) if columns_to_join is None: columns_to_join = [] for column in columns_to_join: if column.startswith('instances'): query = query.options(joinedload_all(column)) result = query.first() if not result: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) return result @require_context def security_group_get_by_name(context, project_id, group_name, columns_to_join=None): query = _security_group_get_query(context, read_deleted="no", join_rules=False).\ filter_by(project_id=project_id).\ filter_by(name=group_name) if columns_to_join is None: columns_to_join = ['instances', 'rules.grantee_group'] for column in columns_to_join: query = query.options(joinedload_all(column)) result = query.first() if not result: raise exception.SecurityGroupNotFoundForProject( project_id=project_id, security_group_id=group_name) return result @require_context def security_group_get_by_project(context, project_id): return _security_group_get_query(context, read_deleted="no").\ filter_by(project_id=project_id).\ all() @require_context def security_group_get_by_instance(context, instance_uuid): return _security_group_get_query(context, read_deleted="no").\ join(models.SecurityGroup.instances).\ filter_by(uuid=instance_uuid).\ all() @require_context def security_group_in_use(context, group_id): session = get_session() with session.begin(): # Are there any instances that haven't been deleted # that include this group? inst_assoc = model_query(context, models.SecurityGroupInstanceAssociation, read_deleted="no", session=session).\ filter_by(security_group_id=group_id).\ all() for ia in inst_assoc: num_instances = model_query(context, models.Instance, session=session, read_deleted="no").\ filter_by(uuid=ia.instance_uuid).\ count() if num_instances: return True return False @require_context def security_group_create(context, values): return _security_group_create(context, values) @require_context def security_group_update(context, security_group_id, values, columns_to_join=None): session = get_session() with session.begin(): query = model_query(context, models.SecurityGroup, session=session).filter_by(id=security_group_id) if columns_to_join: for column in columns_to_join: query = query.options(joinedload_all(column)) security_group_ref = query.first() if not security_group_ref: raise exception.SecurityGroupNotFound( security_group_id=security_group_id) security_group_ref.update(values) name = security_group_ref['name'] project_id = security_group_ref['project_id'] try: security_group_ref.save(session=session) except db_exc.DBDuplicateEntry: raise exception.SecurityGroupExists( project_id=project_id, security_group_name=name) return security_group_ref def security_group_ensure_default(context): """Ensure default security group exists for a project_id.""" try: return _security_group_ensure_default(context) except exception.SecurityGroupExists: # NOTE(rpodolyaka): a concurrent transaction has succeeded first, # suppress the error and proceed return security_group_get_by_name(context, context.project_id, 'default') def _security_group_ensure_default(context, session=None): if session is None: session = get_session() with session.begin(subtransactions=True): try: default_group = _security_group_get_by_names(context, session, context.project_id, ['default'])[0] except exception.NotFound: values = {'name': 'default', 'description': 'default', 'user_id': context.user_id, 'project_id': context.project_id} default_group = _security_group_create(context, values, session=session) usage = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=context.project_id).\ filter_by(user_id=context.user_id).\ filter_by(resource='security_groups') # Create quota usage for auto created default security group if not usage.first(): elevated = context.elevated() _quota_usage_create(elevated, context.project_id, context.user_id, 'security_groups', 1, 0, None, session=session) else: usage.update({'in_use': int(usage.first().in_use) + 1}) default_rules = _security_group_rule_get_default_query(context, session=session).all() for default_rule in default_rules: # This is suboptimal, it should be programmatic to know # the values of the default_rule rule_values = {'protocol': default_rule.protocol, 'from_port': default_rule.from_port, 'to_port': default_rule.to_port, 'cidr': default_rule.cidr, 'parent_group_id': default_group.id, } _security_group_rule_create(context, rule_values, session=session) return default_group @require_context def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): model_query(context, models.SecurityGroup, session=session).\ filter_by(id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupInstanceAssociation, session=session).\ filter_by(security_group_id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupIngressRule, session=session).\ filter_by(group_id=security_group_id).\ soft_delete() model_query(context, models.SecurityGroupIngressRule, session=session).\ filter_by(parent_group_id=security_group_id).\ soft_delete() def _security_group_count_by_project_and_user(context, project_id, user_id, session=None): nova.context.authorize_project_context(context, project_id) return model_query(context, models.SecurityGroup, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ filter_by(user_id=user_id).\ count() ################### def _security_group_rule_create(context, values, session=None): security_group_rule_ref = models.SecurityGroupIngressRule() security_group_rule_ref.update(values) security_group_rule_ref.save(session=session) return security_group_rule_ref def _security_group_rule_get_query(context, session=None): return model_query(context, models.SecurityGroupIngressRule, session=session) @require_context def security_group_rule_get(context, security_group_rule_id): result = (_security_group_rule_get_query(context). filter_by(id=security_group_rule_id). first()) if not result: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) return result @require_context def security_group_rule_get_by_security_group(context, security_group_id, columns_to_join=None): if columns_to_join is None: columns_to_join = ['grantee_group.instances.system_metadata', 'grantee_group.instances.info_cache'] query = (_security_group_rule_get_query(context). filter_by(parent_group_id=security_group_id)) for column in columns_to_join: query = query.options(joinedload_all(column)) return query.all() @require_context def security_group_rule_get_by_security_group_grantee(context, security_group_id): return (_security_group_rule_get_query(context). filter_by(group_id=security_group_id). all()) @require_context def security_group_rule_create(context, values): return _security_group_rule_create(context, values) @require_context def security_group_rule_destroy(context, security_group_rule_id): count = (_security_group_rule_get_query(context). filter_by(id=security_group_rule_id). soft_delete()) if count == 0: raise exception.SecurityGroupNotFoundForRule( rule_id=security_group_rule_id) @require_context def security_group_rule_count_by_group(context, security_group_id): return (model_query(context, models.SecurityGroupIngressRule, read_deleted="no"). filter_by(parent_group_id=security_group_id). count()) # ################### def _security_group_rule_get_default_query(context, session=None): return model_query(context, models.SecurityGroupIngressDefaultRule, session=session) @require_context def security_group_default_rule_get(context, security_group_rule_default_id): result = _security_group_rule_get_default_query(context).\ filter_by(id=security_group_rule_default_id).\ first() if not result: raise exception.SecurityGroupDefaultRuleNotFound( rule_id=security_group_rule_default_id) return result @require_admin_context def security_group_default_rule_destroy(context, security_group_rule_default_id): session = get_session() with session.begin(): count = _security_group_rule_get_default_query(context, session=session).\ filter_by(id=security_group_rule_default_id).\ soft_delete() if count == 0: raise exception.SecurityGroupDefaultRuleNotFound( rule_id=security_group_rule_default_id) @require_admin_context def security_group_default_rule_create(context, values): security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule() security_group_default_rule_ref.update(values) security_group_default_rule_ref.save() return security_group_default_rule_ref @require_context def security_group_default_rule_list(context): return _security_group_rule_get_default_query(context).\ all() ################### @require_admin_context def provider_fw_rule_create(context, rule): fw_rule_ref = models.ProviderFirewallRule() fw_rule_ref.update(rule) fw_rule_ref.save() return fw_rule_ref @require_admin_context def provider_fw_rule_get_all(context): return model_query(context, models.ProviderFirewallRule).all() @require_admin_context def provider_fw_rule_destroy(context, rule_id): session = get_session() with session.begin(): session.query(models.ProviderFirewallRule).\ filter_by(id=rule_id).\ soft_delete() ################### @require_context def project_get_networks(context, project_id, associate=True): # NOTE(tr3buchet): as before this function will associate # a project with a network if it doesn't have one and # associate is true result = model_query(context, models.Network, read_deleted="no").\ filter_by(project_id=project_id).\ all() if not result: if not associate: return [] return [network_associate(context, project_id)] return result ################### @require_admin_context def migration_create(context, values): migration = models.Migration() migration.update(values) migration.save() return migration @require_admin_context def migration_update(context, id, values): session = get_session() with session.begin(): migration = _migration_get(context, id, session=session) migration.update(values) return migration def _migration_get(context, id, session=None): result = model_query(context, models.Migration, session=session, read_deleted="yes").\ filter_by(id=id).\ first() if not result: raise exception.MigrationNotFound(migration_id=id) return result @require_admin_context def migration_get(context, id): return _migration_get(context, id) @require_admin_context def migration_get_by_instance_and_status(context, instance_uuid, status): result = model_query(context, models.Migration, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid).\ filter_by(status=status).\ first() if not result: raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid, status=status) return result @require_admin_context def migration_get_unconfirmed_by_dest_compute(context, confirm_window, dest_compute, use_slave=False): confirm_window = (timeutils.utcnow() - datetime.timedelta(seconds=confirm_window)) return model_query(context, models.Migration, read_deleted="yes", use_slave=use_slave).\ filter(models.Migration.updated_at <= confirm_window).\ filter_by(status="finished").\ filter_by(dest_compute=dest_compute).\ all() @require_admin_context def migration_get_in_progress_by_host_and_node(context, host, node): return model_query(context, models.Migration).\ filter(or_(and_(models.Migration.source_compute == host, models.Migration.source_node == node), and_(models.Migration.dest_compute == host, models.Migration.dest_node == node))).\ filter(~models.Migration.status.in_(['confirmed', 'reverted', 'error'])).\ options(joinedload_all('instance.system_metadata')).\ all() @require_admin_context def migration_get_all_by_filters(context, filters): query = model_query(context, models.Migration) if "status" in filters: query = query.filter(models.Migration.status == filters["status"]) if "host" in filters: host = filters["host"] query = query.filter(or_(models.Migration.source_compute == host, models.Migration.dest_compute == host)) return query.all() ################## def console_pool_create(context, values): pool = models.ConsolePool() pool.update(values) try: pool.save() except db_exc.DBDuplicateEntry: raise exception.ConsolePoolExists( host=values["host"], console_type=values["console_type"], compute_host=values["compute_host"], ) return pool def console_pool_get_by_host_type(context, compute_host, host, console_type): result = model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ filter_by(compute_host=compute_host).\ options(joinedload('consoles')).\ first() if not result: raise exception.ConsolePoolNotFoundForHostType( host=host, console_type=console_type, compute_host=compute_host) return result def console_pool_get_all_by_host_type(context, host, console_type): return model_query(context, models.ConsolePool, read_deleted="no").\ filter_by(host=host).\ filter_by(console_type=console_type).\ options(joinedload('consoles')).\ all() def console_create(context, values): console = models.Console() console.update(values) console.save() return console def console_delete(context, console_id): session = get_session() with session.begin(): # NOTE(mdragon): consoles are meant to be transient. session.query(models.Console).\ filter_by(id=console_id).\ delete() def console_get_by_pool_instance(context, pool_id, instance_uuid): result = model_query(context, models.Console, read_deleted="yes").\ filter_by(pool_id=pool_id).\ filter_by(instance_uuid=instance_uuid).\ options(joinedload('pool')).\ first() if not result: raise exception.ConsoleNotFoundInPoolForInstance( pool_id=pool_id, instance_uuid=instance_uuid) return result def console_get_all_by_instance(context, instance_uuid, columns_to_join=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(instance_uuid=instance_uuid) if columns_to_join: for column in columns_to_join: query = query.options(joinedload(column)) return query.all() def console_get(context, console_id, instance_uuid=None): query = model_query(context, models.Console, read_deleted="yes").\ filter_by(id=console_id).\ options(joinedload('pool')) if instance_uuid is not None: query = query.filter_by(instance_uuid=instance_uuid) result = query.first() if not result: if instance_uuid: raise exception.ConsoleNotFoundForInstance( console_id=console_id, instance_uuid=instance_uuid) else: raise exception.ConsoleNotFound(console_id=console_id) return result ################## @require_admin_context def flavor_create(context, values, projects=None): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ specs = values.get('extra_specs') specs_refs = [] if specs: for k, v in specs.iteritems(): specs_ref = models.InstanceTypeExtraSpecs() specs_ref['key'] = k specs_ref['value'] = v specs_refs.append(specs_ref) values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) if projects is None: projects = [] session = get_session() with session.begin(): try: instance_type_ref.save() except db_exc.DBDuplicateEntry as e: if 'flavorid' in e.columns: raise exception.FlavorIdExists(flavor_id=values['flavorid']) raise exception.FlavorExists(name=values['name']) except Exception as e: raise db_exc.DBError(e) for project in set(projects): access_ref = models.InstanceTypeProjects() access_ref.update({"instance_type_id": instance_type_ref.id, "project_id": project}) access_ref.save() return _dict_with_extra_specs(instance_type_ref) def _dict_with_extra_specs(inst_type_query): """Takes an instance or instance type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict def _flavor_get_query(context, session=None, read_deleted=None): query = model_query(context, models.InstanceTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')) if not context.is_admin: the_filter = [models.InstanceTypes.is_public == True] the_filter.extend([ models.InstanceTypes.projects.any(project_id=context.project_id) ]) query = query.filter(or_(*the_filter)) return query @require_context def flavor_get_all(context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): """Returns all flavors. """ filters = filters or {} # FIXME(sirp): now that we have the `disabled` field for flavors, we # should probably remove the use of `deleted` to mark inactive. `deleted` # should mean truly deleted, e.g. we can safely purge the record out of the # database. read_deleted = "yes" if inactive else "no" sort_fn = {'desc': desc, 'asc': asc} query = _flavor_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) if 'disabled' in filters: query = query.filter( models.InstanceTypes.disabled == filters['disabled']) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.InstanceTypes.is_public == filters['is_public']] if filters['is_public'] and context.project_id is not None: the_filter.extend([ models.InstanceTypes.projects.any( project_id=context.project_id, deleted=0) ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) marker_row = None if marker is not None: marker_row = _flavor_get_query(context, read_deleted=read_deleted).\ filter_by(flavorid=marker).\ first() if not marker_row: raise exception.MarkerNotFound(marker) query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit, [sort_key, 'id'], marker=marker_row, sort_dir=sort_dir) inst_types = query.all() return [_dict_with_extra_specs(i) for i in inst_types] def _flavor_get_id_from_flavor_query(context, flavor_id, session=None): return model_query(context, models.InstanceTypes.id, read_deleted="no", session=session, base_model=models.InstanceTypes).\ filter_by(flavorid=flavor_id) def _flavor_get_id_from_flavor(context, flavor_id, session=None): result = _flavor_get_id_from_flavor_query(context, flavor_id, session=session).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return result[0] @require_context def flavor_get(context, id): """Returns a dict describing specific flavor.""" result = _flavor_get_query(context).\ filter_by(id=id).\ first() if not result: raise exception.FlavorNotFound(flavor_id=id) return _dict_with_extra_specs(result) @require_context def flavor_get_by_name(context, name): """Returns a dict describing specific flavor.""" result = _flavor_get_query(context).\ filter_by(name=name).\ first() if not result: raise exception.FlavorNotFoundByName(flavor_name=name) return _dict_with_extra_specs(result) @require_context def flavor_get_by_flavor_id(context, flavor_id, read_deleted): """Returns a dict describing specific flavor_id.""" result = _flavor_get_query(context, read_deleted=read_deleted).\ filter_by(flavorid=flavor_id).\ order_by(asc("deleted"), asc("id")).\ first() if not result: raise exception.FlavorNotFound(flavor_id=flavor_id) return _dict_with_extra_specs(result) @require_admin_context def flavor_destroy(context, name): """Marks specific flavor as deleted.""" session = get_session() with session.begin(): ref = model_query(context, models.InstanceTypes, session=session, read_deleted="no").\ filter_by(name=name).\ first() if not ref: raise exception.FlavorNotFoundByName(flavor_name=name) ref.soft_delete(session=session) model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=ref['id']).\ soft_delete() model_query(context, models.InstanceTypeProjects, session=session, read_deleted="no").\ filter_by(instance_type_id=ref['id']).\ soft_delete() def _flavor_access_query(context, session=None): return model_query(context, models.InstanceTypeProjects, session=session, read_deleted="no") @require_admin_context def flavor_access_get_by_flavor_id(context, flavor_id): """Get flavor access list by flavor id.""" instance_type_id_subq = \ _flavor_get_id_from_flavor_query(context, flavor_id) access_refs = _flavor_access_query(context).\ filter_by(instance_type_id=instance_type_id_subq).\ all() return access_refs @require_admin_context def flavor_access_add(context, flavor_id, project_id): """Add given tenant to the flavor access list.""" instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) access_ref = models.InstanceTypeProjects() access_ref.update({"instance_type_id": instance_type_id, "project_id": project_id}) try: access_ref.save() except db_exc.DBDuplicateEntry: raise exception.FlavorAccessExists(flavor_id=flavor_id, project_id=project_id) return access_ref @require_admin_context def flavor_access_remove(context, flavor_id, project_id): """Remove given tenant from the flavor access list.""" instance_type_id = _flavor_get_id_from_flavor(context, flavor_id) count = _flavor_access_query(context).\ filter_by(instance_type_id=instance_type_id).\ filter_by(project_id=project_id).\ soft_delete(synchronize_session=False) if count == 0: raise exception.FlavorAccessNotFound(flavor_id=flavor_id, project_id=project_id) def _flavor_extra_specs_get_query(context, flavor_id, session=None): instance_type_id_subq = \ _flavor_get_id_from_flavor_query(context, flavor_id) return model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id_subq) @require_context def flavor_extra_specs_get(context, flavor_id): rows = _flavor_extra_specs_get_query(context, flavor_id).all() return dict([(row['key'], row['value']) for row in rows]) @require_context def flavor_extra_specs_get_item(context, flavor_id, key): result = _flavor_extra_specs_get_query(context, flavor_id).\ filter(models.InstanceTypeExtraSpecs.key == key).\ first() if not result: raise exception.FlavorExtraSpecsNotFound( extra_specs_key=key, flavor_id=flavor_id) return {result["key"]: result["value"]} @require_context def flavor_extra_specs_delete(context, flavor_id, key): result = _flavor_extra_specs_get_query(context, flavor_id).\ filter(models.InstanceTypeExtraSpecs.key == key).\ soft_delete(synchronize_session=False) # did not find the extra spec if result == 0: raise exception.FlavorExtraSpecsNotFound( extra_specs_key=key, flavor_id=flavor_id) @require_context def flavor_extra_specs_update_or_create(context, flavor_id, specs, max_retries=10): for attempt in xrange(max_retries): try: session = get_session() with session.begin(): instance_type_id = _flavor_get_id_from_flavor(context, flavor_id, session) spec_refs = model_query(context, models.InstanceTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(instance_type_id=instance_type_id).\ filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\ all() existing_keys = set() for spec_ref in spec_refs: key = spec_ref["key"] existing_keys.add(key) spec_ref.update({"value": specs[key]}) for key, value in specs.iteritems(): if key in existing_keys: continue spec_ref = models.InstanceTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "instance_type_id": instance_type_id}) session.add(spec_ref) return specs except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt if attempt == max_retries - 1: raise #################### @require_admin_context def cell_create(context, values): cell = models.Cell() cell.update(values) try: cell.save() except db_exc.DBDuplicateEntry: raise exception.CellExists(name=values['name']) return cell def _cell_get_by_name_query(context, cell_name, session=None): return model_query(context, models.Cell, session=session).filter_by(name=cell_name) @require_admin_context def cell_update(context, cell_name, values): session = get_session() with session.begin(): cell_query = _cell_get_by_name_query(context, cell_name, session=session) if not cell_query.update(values): raise exception.CellNotFound(cell_name=cell_name) cell = cell_query.first() return cell @require_admin_context def cell_delete(context, cell_name): return _cell_get_by_name_query(context, cell_name).soft_delete() @require_admin_context def cell_get(context, cell_name): result = _cell_get_by_name_query(context, cell_name).first() if not result: raise exception.CellNotFound(cell_name=cell_name) return result @require_admin_context def cell_get_all(context): return model_query(context, models.Cell, read_deleted="no").all() ######################## # User-provided metadata def _instance_metadata_get_multi(context, instance_uuids, session=None, use_slave=False): if not instance_uuids: return [] return model_query(context, models.InstanceMetadata, session=session, use_slave=use_slave).\ filter( models.InstanceMetadata.instance_uuid.in_(instance_uuids)) def _instance_metadata_get_query(context, instance_uuid, session=None): return model_query(context, models.InstanceMetadata, session=session, read_deleted="no").\ filter_by(instance_uuid=instance_uuid) @require_context def instance_metadata_get(context, instance_uuid): rows = _instance_metadata_get_query(context, instance_uuid).all() return dict((row['key'], row['value']) for row in rows) @require_context @_retry_on_deadlock def instance_metadata_delete(context, instance_uuid, key): _instance_metadata_get_query(context, instance_uuid).\ filter_by(key=key).\ soft_delete() @require_context @_retry_on_deadlock def instance_metadata_update(context, instance_uuid, metadata, delete): all_keys = metadata.keys() session = get_session() with session.begin(subtransactions=True): if delete: _instance_metadata_get_query(context, instance_uuid, session=session).\ filter(~models.InstanceMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) already_existing_keys = [] meta_refs = _instance_metadata_get_query(context, instance_uuid, session=session).\ filter(models.InstanceMetadata.key.in_(all_keys)).\ all() for meta_ref in meta_refs: already_existing_keys.append(meta_ref.key) meta_ref.update({"value": metadata[meta_ref.key]}) new_keys = set(all_keys) - set(already_existing_keys) for key in new_keys: meta_ref = models.InstanceMetadata() meta_ref.update({"key": key, "value": metadata[key], "instance_uuid": instance_uuid}) session.add(meta_ref) return metadata ####################### # System-owned metadata def _instance_system_metadata_get_multi(context, instance_uuids, session=None, use_slave=False): if not instance_uuids: return [] return model_query(context, models.InstanceSystemMetadata, session=session, use_slave=use_slave).\ filter( models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids)) def _instance_system_metadata_get_query(context, instance_uuid, session=None): return model_query(context, models.InstanceSystemMetadata, session=session).\ filter_by(instance_uuid=instance_uuid) @require_context def instance_system_metadata_get(context, instance_uuid): rows = _instance_system_metadata_get_query(context, instance_uuid).all() return dict((row['key'], row['value']) for row in rows) @require_context def instance_system_metadata_update(context, instance_uuid, metadata, delete): all_keys = metadata.keys() session = get_session() with session.begin(subtransactions=True): if delete: _instance_system_metadata_get_query(context, instance_uuid, session=session).\ filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) already_existing_keys = [] meta_refs = _instance_system_metadata_get_query(context, instance_uuid, session=session).\ filter(models.InstanceSystemMetadata.key.in_(all_keys)).\ all() for meta_ref in meta_refs: already_existing_keys.append(meta_ref.key) meta_ref.update({"value": metadata[meta_ref.key]}) new_keys = set(all_keys) - set(already_existing_keys) for key in new_keys: meta_ref = models.InstanceSystemMetadata() meta_ref.update({"key": key, "value": metadata[key], "instance_uuid": instance_uuid}) session.add(meta_ref) return metadata #################### @require_admin_context def agent_build_create(context, values): agent_build_ref = models.AgentBuild() agent_build_ref.update(values) try: agent_build_ref.save() except db_exc.DBDuplicateEntry: raise exception.AgentBuildExists(hypervisor=values['hypervisor'], os=values['os'], architecture=values['architecture']) return agent_build_ref @require_admin_context def agent_build_get_by_triple(context, hypervisor, os, architecture): return model_query(context, models.AgentBuild, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ filter_by(os=os).\ filter_by(architecture=architecture).\ first() @require_admin_context def agent_build_get_all(context, hypervisor=None): if hypervisor: return model_query(context, models.AgentBuild, read_deleted="no").\ filter_by(hypervisor=hypervisor).\ all() else: return model_query(context, models.AgentBuild, read_deleted="no").\ all() @require_admin_context def agent_build_destroy(context, agent_build_id): rows_affected = model_query(context, models.AgentBuild).filter_by( id=agent_build_id).soft_delete() if rows_affected == 0: raise exception.AgentBuildNotFound(id=agent_build_id) @require_admin_context def agent_build_update(context, agent_build_id, values): rows_affected = model_query(context, models.AgentBuild).\ filter_by(id=agent_build_id).\ update(values) if rows_affected == 0: raise exception.AgentBuildNotFound(id=agent_build_id) #################### @require_context def bw_usage_get(context, uuid, start_period, mac, use_slave=False): return model_query(context, models.BandwidthUsage, read_deleted="yes", use_slave=use_slave).\ filter_by(start_period=start_period).\ filter_by(uuid=uuid).\ filter_by(mac=mac).\ first() @require_context def bw_usage_get_by_uuids(context, uuids, start_period): return model_query(context, models.BandwidthUsage, read_deleted="yes").\ filter(models.BandwidthUsage.uuid.in_(uuids)).\ filter_by(start_period=start_period).\ all() @require_context @_retry_on_deadlock def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None): session = get_session() if last_refreshed is None: last_refreshed = timeutils.utcnow() # NOTE(comstud): More often than not, we'll be updating records vs # creating records. Optimize accordingly, trying to update existing # records. Fall back to creation when no rows are updated. with session.begin(): values = {'last_refreshed': last_refreshed, 'last_ctr_in': last_ctr_in, 'last_ctr_out': last_ctr_out, 'bw_in': bw_in, 'bw_out': bw_out} rows = model_query(context, models.BandwidthUsage, session=session, read_deleted="yes").\ filter_by(start_period=start_period).\ filter_by(uuid=uuid).\ filter_by(mac=mac).\ update(values, synchronize_session=False) if rows: return bwusage = models.BandwidthUsage() bwusage.start_period = start_period bwusage.uuid = uuid bwusage.mac = mac bwusage.last_refreshed = last_refreshed bwusage.bw_in = bw_in bwusage.bw_out = bw_out bwusage.last_ctr_in = last_ctr_in bwusage.last_ctr_out = last_ctr_out try: bwusage.save(session=session) except db_exc.DBDuplicateEntry: # NOTE(sirp): Possible race if two greenthreads attempt to create # the usage entry at the same time. First one wins. pass #################### @require_context def vol_get_usage_by_time(context, begin): """Return volumes usage that have been updated after a specified time.""" return model_query(context, models.VolumeUsage, read_deleted="yes").\ filter(or_(models.VolumeUsage.tot_last_refreshed == None, models.VolumeUsage.tot_last_refreshed > begin, models.VolumeUsage.curr_last_refreshed == None, models.VolumeUsage.curr_last_refreshed > begin, )).\ all() @require_context def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes, instance_id, project_id, user_id, availability_zone, update_totals=False): session = get_session() refreshed = timeutils.utcnow() with session.begin(): values = {} # NOTE(dricco): We will be mostly updating current usage records vs # updating total or creating records. Optimize accordingly. if not update_totals: values = {'curr_last_refreshed': refreshed, 'curr_reads': rd_req, 'curr_read_bytes': rd_bytes, 'curr_writes': wr_req, 'curr_write_bytes': wr_bytes, 'instance_uuid': instance_id, 'project_id': project_id, 'user_id': user_id, 'availability_zone': availability_zone} else: values = {'tot_last_refreshed': refreshed, 'tot_reads': models.VolumeUsage.tot_reads + rd_req, 'tot_read_bytes': models.VolumeUsage.tot_read_bytes + rd_bytes, 'tot_writes': models.VolumeUsage.tot_writes + wr_req, 'tot_write_bytes': models.VolumeUsage.tot_write_bytes + wr_bytes, 'curr_reads': 0, 'curr_read_bytes': 0, 'curr_writes': 0, 'curr_write_bytes': 0, 'instance_uuid': instance_id, 'project_id': project_id, 'user_id': user_id, 'availability_zone': availability_zone} current_usage = model_query(context, models.VolumeUsage, session=session, read_deleted="yes").\ filter_by(volume_id=id).\ first() if current_usage: if (rd_req < current_usage['curr_reads'] or rd_bytes < current_usage['curr_read_bytes'] or wr_req < current_usage['curr_writes'] or wr_bytes < current_usage['curr_write_bytes']): LOG.info(_("Volume(%s) has lower stats then what is in " "the database. Instance must have been rebooted " "or crashed. Updating totals.") % id) if not update_totals: values['tot_reads'] = (models.VolumeUsage.tot_reads + current_usage['curr_reads']) values['tot_read_bytes'] = ( models.VolumeUsage.tot_read_bytes + current_usage['curr_read_bytes']) values['tot_writes'] = (models.VolumeUsage.tot_writes + current_usage['curr_writes']) values['tot_write_bytes'] = ( models.VolumeUsage.tot_write_bytes + current_usage['curr_write_bytes']) else: values['tot_reads'] = (models.VolumeUsage.tot_reads + current_usage['curr_reads'] + rd_req) values['tot_read_bytes'] = ( models.VolumeUsage.tot_read_bytes + current_usage['curr_read_bytes'] + rd_bytes) values['tot_writes'] = (models.VolumeUsage.tot_writes + current_usage['curr_writes'] + wr_req) values['tot_write_bytes'] = ( models.VolumeUsage.tot_write_bytes + current_usage['curr_write_bytes'] + wr_bytes) current_usage.update(values) current_usage.save(session=session) session.refresh(current_usage) return current_usage vol_usage = models.VolumeUsage() vol_usage.volume_id = id vol_usage.instance_uuid = instance_id vol_usage.project_id = project_id vol_usage.user_id = user_id vol_usage.availability_zone = availability_zone if not update_totals: vol_usage.curr_last_refreshed = refreshed vol_usage.curr_reads = rd_req vol_usage.curr_read_bytes = rd_bytes vol_usage.curr_writes = wr_req vol_usage.curr_write_bytes = wr_bytes else: vol_usage.tot_last_refreshed = refreshed vol_usage.tot_reads = rd_req vol_usage.tot_read_bytes = rd_bytes vol_usage.tot_writes = wr_req vol_usage.tot_write_bytes = wr_bytes vol_usage.save(session=session) return vol_usage #################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id.""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(id=image_id).\ first() if not result: raise exception.ImageNotFound(image_id=image_id) return result def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid.""" result = model_query(context, models.S3Image, read_deleted="yes").\ filter_by(uuid=image_uuid).\ first() if not result: raise exception.ImageNotFound(image_id=image_uuid) return result def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid.""" try: s3_image_ref = models.S3Image() s3_image_ref.update({'uuid': image_uuid}) s3_image_ref.save() except Exception as e: raise db_exc.DBError(e) return s3_image_ref #################### def _aggregate_get_query(context, model_class, id_field=None, id=None, session=None, read_deleted=None): columns_to_join = {models.Aggregate: ['_hosts', '_metadata']} query = model_query(context, model_class, session=session, read_deleted=read_deleted) for c in columns_to_join.get(model_class, []): query = query.options(joinedload(c)) if id and id_field: query = query.filter(id_field == id) return query def aggregate_create(context, values, metadata=None): session = get_session() query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='no') aggregate = query.first() if not aggregate: aggregate = models.Aggregate() aggregate.update(values) aggregate.save(session=session) # We don't want these to be lazy loaded later. We know there is # nothing here since we just created this aggregate. aggregate._hosts = [] aggregate._metadata = [] else: raise exception.AggregateNameExists(aggregate_name=values['name']) if metadata: aggregate_metadata_add(context, aggregate.id, metadata) return aggregate_get(context, aggregate.id) def aggregate_get(context, aggregate_id): query = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id) aggregate = query.first() if not aggregate: raise exception.AggregateNotFound(aggregate_id=aggregate_id) return aggregate def aggregate_get_by_host(context, host, key=None): """Return rows that match host (mandatory) and metadata key (optional). :param host matches host, and is required. :param key Matches metadata key, if not None. """ query = model_query(context, models.Aggregate) query = query.options(joinedload('_hosts')) query = query.options(joinedload('_metadata')) query = query.join('_hosts') query = query.filter(models.AggregateHost.host == host) if key: query = query.join("_metadata").filter( models.AggregateMetadata.key == key) return query.all() def aggregate_metadata_get_by_host(context, host, key=None): query = model_query(context, models.Aggregate) query = query.join("_hosts") query = query.join("_metadata") query = query.filter(models.AggregateHost.host == host) query = query.options(contains_eager("_metadata")) if key: query = query.filter(models.AggregateMetadata.key == key) rows = query.all() metadata = collections.defaultdict(set) for agg in rows: for kv in agg._metadata: metadata[kv['key']].add(kv['value']) return dict(metadata) def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key): query = model_query(context, models.Aggregate) query = query.join("_metadata") query = query.filter(models.Aggregate.id == aggregate_id) query = query.options(contains_eager("_metadata")) query = query.filter(models.AggregateMetadata.key == key) rows = query.all() metadata = collections.defaultdict(set) for agg in rows: for kv in agg._metadata: metadata[kv['key']].add(kv['value']) return dict(metadata) def aggregate_host_get_by_metadata_key(context, key): query = model_query(context, models.Aggregate) query = query.join("_metadata") query = query.filter(models.AggregateMetadata.key == key) query = query.options(contains_eager("_metadata")) query = query.options(joinedload("_hosts")) rows = query.all() metadata = collections.defaultdict(set) for agg in rows: for agghost in agg._hosts: metadata[agghost.host].add(agg._metadata[0]['value']) return dict(metadata) def aggregate_update(context, aggregate_id, values): session = get_session() if "name" in values: aggregate_by_name = (_aggregate_get_query(context, models.Aggregate, models.Aggregate.name, values['name'], session=session, read_deleted='no').first()) if aggregate_by_name and aggregate_by_name.id != aggregate_id: # there is another aggregate with the new name raise exception.AggregateNameExists(aggregate_name=values['name']) aggregate = (_aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session).first()) set_delete = True if aggregate: if "availability_zone" in values: az = values.pop('availability_zone') if 'metadata' not in values: values['metadata'] = {'availability_zone': az} set_delete = False else: values['metadata']['availability_zone'] = az metadata = values.get('metadata') if metadata is not None: aggregate_metadata_add(context, aggregate_id, values.pop('metadata'), set_delete=set_delete) aggregate.update(values) aggregate.save(session=session) values['metadata'] = metadata return aggregate_get(context, aggregate.id) else: raise exception.AggregateNotFound(aggregate_id=aggregate_id) def aggregate_delete(context, aggregate_id): session = get_session() with session.begin(): count = _aggregate_get_query(context, models.Aggregate, models.Aggregate.id, aggregate_id, session=session).\ soft_delete() if count == 0: raise exception.AggregateNotFound(aggregate_id=aggregate_id) #Delete Metadata model_query(context, models.AggregateMetadata, session=session).\ filter_by(aggregate_id=aggregate_id).\ soft_delete() def aggregate_get_all(context): return _aggregate_get_query(context, models.Aggregate).all() def _aggregate_metadata_get_query(context, aggregate_id, session=None, read_deleted="yes"): return model_query(context, models.AggregateMetadata, read_deleted=read_deleted, session=session).\ filter_by(aggregate_id=aggregate_id) @require_aggregate_exists def aggregate_metadata_get(context, aggregate_id): rows = model_query(context, models.AggregateMetadata).\ filter_by(aggregate_id=aggregate_id).all() return dict([(r['key'], r['value']) for r in rows]) @require_aggregate_exists def aggregate_metadata_delete(context, aggregate_id, key): count = _aggregate_get_query(context, models.AggregateMetadata, models.AggregateMetadata.aggregate_id, aggregate_id).\ filter_by(key=key).\ soft_delete() if count == 0: raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, metadata_key=key) @require_aggregate_exists def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False, max_retries=10): all_keys = metadata.keys() for attempt in xrange(max_retries): try: session = get_session() with session.begin(): query = _aggregate_metadata_get_query(context, aggregate_id, read_deleted='no', session=session) if set_delete: query.filter(~models.AggregateMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) query = \ query.filter(models.AggregateMetadata.key.in_(all_keys)) already_existing_keys = set() for meta_ref in query.all(): key = meta_ref.key meta_ref.update({"value": metadata[key]}) already_existing_keys.add(key) for key, value in metadata.iteritems(): if key in already_existing_keys: continue meta_ref = models.AggregateMetadata() meta_ref.update({"key": key, "value": value, "aggregate_id": aggregate_id}) session.add(meta_ref) return metadata except db_exc.DBDuplicateEntry: # a concurrent transaction has been committed, # try again unless this was the last attempt with excutils.save_and_reraise_exception() as ctxt: if attempt < max_retries - 1: ctxt.reraise = False else: msg = _("Add metadata failed for aggregate %(id)s after " "%(retries)s retries") % {"id": aggregate_id, "retries": max_retries} LOG.warn(msg) @require_aggregate_exists def aggregate_host_get_all(context, aggregate_id): rows = model_query(context, models.AggregateHost).\ filter_by(aggregate_id=aggregate_id).all() return [r.host for r in rows] @require_aggregate_exists def aggregate_host_delete(context, aggregate_id, host): count = _aggregate_get_query(context, models.AggregateHost, models.AggregateHost.aggregate_id, aggregate_id).\ filter_by(host=host).\ soft_delete() if count == 0: raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, host=host) @require_aggregate_exists def aggregate_host_add(context, aggregate_id, host): host_ref = models.AggregateHost() host_ref.update({"host": host, "aggregate_id": aggregate_id}) try: host_ref.save() except db_exc.DBDuplicateEntry: raise exception.AggregateHostExists(host=host, aggregate_id=aggregate_id) return host_ref ################ def instance_fault_create(context, values): """Create a new InstanceFault.""" fault_ref = models.InstanceFault() fault_ref.update(values) fault_ref.save() return dict(fault_ref.iteritems()) def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" if not instance_uuids: return {} rows = model_query(context, models.InstanceFault, read_deleted='no').\ filter(models.InstanceFault.instance_uuid.in_( instance_uuids)).\ order_by(desc("created_at"), desc("id")).\ all() output = {} for instance_uuid in instance_uuids: output[instance_uuid] = [] for row in rows: data = dict(row.iteritems()) output[row['instance_uuid']].append(data) return output ################## def action_start(context, values): convert_objects_related_datetimes(values, 'start_time') action_ref = models.InstanceAction() action_ref.update(values) action_ref.save() return action_ref def action_finish(context, values): convert_objects_related_datetimes(values, 'start_time', 'finish_time') session = get_session() with session.begin(): action_ref = model_query(context, models.InstanceAction, session=session).\ filter_by(instance_uuid=values['instance_uuid']).\ filter_by(request_id=values['request_id']).\ first() if not action_ref: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) action_ref.update(values) return action_ref def actions_get(context, instance_uuid): """Get all instance actions for the provided uuid.""" actions = model_query(context, models.InstanceAction).\ filter_by(instance_uuid=instance_uuid).\ order_by(desc("created_at"), desc("id")).\ all() return actions def action_get_by_request_id(context, instance_uuid, request_id): """Get the action by request_id and given instance.""" action = _action_get_by_request_id(context, instance_uuid, request_id) return action def _action_get_by_request_id(context, instance_uuid, request_id, session=None): result = model_query(context, models.InstanceAction, session=session).\ filter_by(instance_uuid=instance_uuid).\ filter_by(request_id=request_id).\ first() return result def action_event_start(context, values): """Start an event on an instance action.""" convert_objects_related_datetimes(values, 'start_time') session = get_session() with session.begin(): action = _action_get_by_request_id(context, values['instance_uuid'], values['request_id'], session) if not action: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) values['action_id'] = action['id'] event_ref = models.InstanceActionEvent() event_ref.update(values) session.add(event_ref) return event_ref def action_event_finish(context, values): """Finish an event on an instance action.""" convert_objects_related_datetimes(values, 'start_time', 'finish_time') session = get_session() with session.begin(): action = _action_get_by_request_id(context, values['instance_uuid'], values['request_id'], session) if not action: raise exception.InstanceActionNotFound( request_id=values['request_id'], instance_uuid=values['instance_uuid']) event_ref = model_query(context, models.InstanceActionEvent, session=session).\ filter_by(action_id=action['id']).\ filter_by(event=values['event']).\ first() if not event_ref: raise exception.InstanceActionEventNotFound(action_id=action['id'], event=values['event']) event_ref.update(values) if values['result'].lower() == 'error': action.update({'message': 'Error'}) return event_ref def action_events_get(context, action_id): events = model_query(context, models.InstanceActionEvent).\ filter_by(action_id=action_id).\ order_by(desc("created_at"), desc("id")).\ all() return events def action_event_get_by_id(context, action_id, event_id): event = model_query(context, models.InstanceActionEvent).\ filter_by(action_id=action_id).\ filter_by(id=event_id).\ first() return event ################## @require_context def ec2_instance_create(context, instance_uuid, id=None): """Create ec2 compatible instance by provided uuid.""" ec2_instance_ref = models.InstanceIdMapping() ec2_instance_ref.update({'uuid': instance_uuid}) if id is not None: ec2_instance_ref.update({'id': id}) ec2_instance_ref.save() return ec2_instance_ref @require_context def get_ec2_instance_id_by_uuid(context, instance_id): result = _ec2_instance_get_query(context).\ filter_by(uuid=instance_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_id) return result['id'] @require_context def get_instance_uuid_by_ec2_id(context, ec2_id): result = _ec2_instance_get_query(context).\ filter_by(id=ec2_id).\ first() if not result: raise exception.InstanceNotFound(instance_id=ec2_id) return result['uuid'] def _ec2_instance_get_query(context, session=None): return model_query(context, models.InstanceIdMapping, session=session, read_deleted='yes') def _task_log_get_query(context, task_name, period_beginning, period_ending, host=None, state=None, session=None): query = model_query(context, models.TaskLog, session=session).\ filter_by(task_name=task_name).\ filter_by(period_beginning=period_beginning).\ filter_by(period_ending=period_ending) if host is not None: query = query.filter_by(host=host) if state is not None: query = query.filter_by(state=state) return query @require_admin_context def task_log_get(context, task_name, period_beginning, period_ending, host, state=None): return _task_log_get_query(context, task_name, period_beginning, period_ending, host, state).first() @require_admin_context def task_log_get_all(context, task_name, period_beginning, period_ending, host=None, state=None): return _task_log_get_query(context, task_name, period_beginning, period_ending, host, state).all() @require_admin_context def task_log_begin_task(context, task_name, period_beginning, period_ending, host, task_items=None, message=None): task = models.TaskLog() task.task_name = task_name task.period_beginning = period_beginning task.period_ending = period_ending task.host = host task.state = "RUNNING" if message: task.message = message if task_items: task.task_items = task_items try: task.save() except db_exc.DBDuplicateEntry: raise exception.TaskAlreadyRunning(task_name=task_name, host=host) @require_admin_context def task_log_end_task(context, task_name, period_beginning, period_ending, host, errors, message=None): values = dict(state="DONE", errors=errors) if message: values["message"] = message session = get_session() with session.begin(): rows = _task_log_get_query(context, task_name, period_beginning, period_ending, host, session=session).\ update(values) if rows == 0: #It's not running! raise exception.TaskNotRunning(task_name=task_name, host=host) def _get_default_deleted_value(table): # TODO(dripton): It would be better to introspect the actual default value # from the column, but I don't see a way to do that in the low-level APIs # of SQLAlchemy 0.7. 0.8 has better introspection APIs, which we should # use when Nova is ready to require 0.8. # NOTE(mikal): this is a little confusing. This method returns the value # that a _not_deleted_ row would have. deleted_column_type = table.c.deleted.type if isinstance(deleted_column_type, Integer): return 0 elif isinstance(deleted_column_type, Boolean): return False elif isinstance(deleted_column_type, String): return "" else: return None @require_admin_context def archive_deleted_rows_for_table(context, tablename, max_rows): """Move up to max_rows rows from one tables to the corresponding shadow table. The context argument is only used for the decorator. :returns: number of rows archived """ # NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils # imports nova.db.sqlalchemy.api. from nova.db.sqlalchemy import utils as db_utils engine = get_engine() conn = engine.connect() metadata = MetaData() metadata.bind = engine table = Table(tablename, metadata, autoload=True) default_deleted_value = _get_default_deleted_value(table) shadow_tablename = _SHADOW_TABLE_PREFIX + tablename rows_archived = 0 try: shadow_table = Table(shadow_tablename, metadata, autoload=True) except NoSuchTableError: # No corresponding shadow table; skip it. return rows_archived if tablename == "dns_domains": # We have one table (dns_domains) where the key is called # "domain" rather than "id" column = table.c.domain column_name = "domain" else: column = table.c.id column_name = "id" # NOTE(guochbo): Use InsertFromSelect and DeleteFromSelect to avoid # database's limit of maximum parameter in one SQL statement. query_insert = select([table], table.c.deleted != default_deleted_value).\ order_by(column).limit(max_rows) query_delete = select([column], table.c.deleted != default_deleted_value).\ order_by(column).limit(max_rows) insert_statement = db_utils.InsertFromSelect(shadow_table, query_insert) delete_statement = db_utils.DeleteFromSelect(table, query_delete, column) try: # Group the insert and delete in a transaction. with conn.begin(): result_insert = conn.execute(insert_statement) result_delete = conn.execute(delete_statement) except IntegrityError: # A foreign key constraint keeps us from deleting some of # these rows until we clean up a dependent table. Just # skip this table for now; we'll come back to it later. msg = _("IntegrityError detected when archiving table %s") % tablename LOG.warn(msg) return rows_archived rows_archived = result_delete.rowcount return rows_archived @require_admin_context def archive_deleted_rows(context, max_rows=None): """Move up to max_rows rows from production tables to the corresponding shadow tables. :returns: Number of rows archived. """ # The context argument is only used for the decorator. tablenames = [] for model_class in models.__dict__.itervalues(): if hasattr(model_class, "__tablename__"): tablenames.append(model_class.__tablename__) rows_archived = 0 for tablename in tablenames: rows_archived += archive_deleted_rows_for_table(context, tablename, max_rows=max_rows - rows_archived) if rows_archived >= max_rows: break return rows_archived #################### def _instance_group_get_query(context, model_class, id_field=None, id=None, session=None, read_deleted=None): columns_to_join = {models.InstanceGroup: ['_policies', '_metadata', '_members']} query = model_query(context, model_class, session=session, read_deleted=read_deleted) for c in columns_to_join.get(model_class, []): query = query.options(joinedload(c)) if id and id_field: query = query.filter(id_field == id) return query def instance_group_create(context, values, policies=None, metadata=None, members=None): """Create a new group with metadata.""" uuid = values.get('uuid', None) if uuid is None: uuid = uuidutils.generate_uuid() values['uuid'] = uuid session = get_session() with session.begin(): try: group = models.InstanceGroup() group.update(values) group.save(session=session) except db_exc.DBDuplicateEntry: raise exception.InstanceGroupIdExists(group_uuid=uuid) # We don't want these to be lazy loaded later. We know there is # nothing here since we just created this instance group. group._policies = [] group._metadata = [] group._members = [] if policies: _instance_group_policies_add(context, group.id, policies, session=session) if metadata: _instance_group_metadata_add(context, group.id, metadata, session=session) if members: _instance_group_members_add(context, group.id, members, session=session) return instance_group_get(context, uuid) def instance_group_get(context, group_uuid): """Get a specific group by uuid.""" group = _instance_group_get_query(context, models.InstanceGroup, models.InstanceGroup.uuid, group_uuid).\ first() if not group: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) return group def instance_group_update(context, group_uuid, values): """Update the attributes of an group. If values contains a metadata key, it updates the aggregate metadata too. Similarly for the policies and members. """ session = get_session() with session.begin(): group = model_query(context, models.InstanceGroup, session=session).\ filter_by(uuid=group_uuid).\ first() if not group: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) policies = values.get('policies') if policies is not None: _instance_group_policies_add(context, group.id, values.pop('policies'), set_delete=True, session=session) metadata = values.get('metadata') if metadata is not None: _instance_group_metadata_add(context, group.id, values.pop('metadata'), set_delete=True, session=session) members = values.get('members') if members is not None: _instance_group_members_add(context, group.id, values.pop('members'), set_delete=True, session=session) group.update(values) if policies: values['policies'] = policies if metadata: values['metadata'] = metadata if members: values['members'] = members def instance_group_delete(context, group_uuid): """Delete an group.""" session = get_session() with session.begin(): group_id = _instance_group_id(context, group_uuid, session=session) count = _instance_group_get_query(context, models.InstanceGroup, models.InstanceGroup.uuid, group_uuid, session=session).soft_delete() if count == 0: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) # Delete policies, metadata and members instance_models = [models.InstanceGroupPolicy, models.InstanceGroupMetadata, models.InstanceGroupMember] for model in instance_models: model_query(context, model, session=session).\ filter_by(group_id=group_id).\ soft_delete() def instance_group_get_all(context): """Get all groups.""" return _instance_group_get_query(context, models.InstanceGroup).all() def instance_group_get_all_by_project_id(context, project_id): """Get all groups.""" return _instance_group_get_query(context, models.InstanceGroup).\ filter_by(project_id=project_id).\ all() def _instance_group_model_get_query(context, model_class, group_id, session=None, read_deleted='no'): return model_query(context, model_class, read_deleted=read_deleted, session=session).\ filter_by(group_id=group_id) def _instance_group_id(context, group_uuid, session=None): """Returns the group database ID for the group UUID.""" result = model_query(context, models.InstanceGroup.id, base_model=models.InstanceGroup, session=session).\ filter_by(uuid=group_uuid).\ first() if not result: raise exception.InstanceGroupNotFound(group_uuid=group_uuid) return result.id def _instance_group_metadata_add(context, id, metadata, set_delete=False, session=None): if not session: session = get_session() with session.begin(subtransactions=True): all_keys = metadata.keys() query = _instance_group_model_get_query(context, models.InstanceGroupMetadata, id, session=session) if set_delete: query.filter(~models.InstanceGroupMetadata.key.in_(all_keys)).\ soft_delete(synchronize_session=False) query = query.filter(models.InstanceGroupMetadata.key.in_(all_keys)) already_existing_keys = set() for meta_ref in query.all(): key = meta_ref.key meta_ref.update({'value': metadata[key]}) already_existing_keys.add(key) for key, value in metadata.iteritems(): if key in already_existing_keys: continue meta_ref = models.InstanceGroupMetadata() meta_ref.update({'key': key, 'value': value, 'group_id': id}) session.add(meta_ref) return metadata def instance_group_metadata_add(context, group_uuid, metadata, set_delete=False): id = _instance_group_id(context, group_uuid) return _instance_group_metadata_add(context, id, metadata, set_delete=set_delete) def instance_group_metadata_delete(context, group_uuid, key): id = _instance_group_id(context, group_uuid) count = _instance_group_get_query(context, models.InstanceGroupMetadata, models.InstanceGroupMetadata.group_id, id).\ filter_by(key=key).\ soft_delete() if count == 0: raise exception.InstanceGroupMetadataNotFound(group_uuid=group_uuid, metadata_key=key) def instance_group_metadata_get(context, group_uuid): id = _instance_group_id(context, group_uuid) rows = model_query(context, models.InstanceGroupMetadata.key, models.InstanceGroupMetadata.value, base_model=models.InstanceGroupMetadata).\ filter_by(group_id=id).all() return dict((r[0], r[1]) for r in rows) def _instance_group_members_add(context, id, members, set_delete=False, session=None): if not session: session = get_session() all_members = set(members) with session.begin(subtransactions=True): query = _instance_group_model_get_query(context, models.InstanceGroupMember, id, session=session) if set_delete: query.filter(~models.InstanceGroupMember.instance_id.in_( all_members)).\ soft_delete(synchronize_session=False) query = query.filter( models.InstanceGroupMember.instance_id.in_(all_members)) already_existing = set() for member_ref in query.all(): already_existing.add(member_ref.instance_id) for instance_id in members: if instance_id in already_existing: continue member_ref = models.InstanceGroupMember() member_ref.update({'instance_id': instance_id, 'group_id': id}) session.add(member_ref) return members def instance_group_members_add(context, group_uuid, members, set_delete=False): id = _instance_group_id(context, group_uuid) return _instance_group_members_add(context, id, members, set_delete=set_delete) def instance_group_member_delete(context, group_uuid, instance_id): id = _instance_group_id(context, group_uuid) count = _instance_group_get_query(context, models.InstanceGroupMember, models.InstanceGroupMember.group_id, id).\ filter_by(instance_id=instance_id).\ soft_delete() if count == 0: raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid, instance_id=instance_id) def instance_group_members_get(context, group_uuid): id = _instance_group_id(context, group_uuid) instances = model_query(context, models.InstanceGroupMember.instance_id, base_model=models.InstanceGroupMember).\ filter_by(group_id=id).all() return [instance[0] for instance in instances] def _instance_group_policies_add(context, id, policies, set_delete=False, session=None): if not session: session = get_session() allpols = set(policies) with session.begin(subtransactions=True): query = _instance_group_model_get_query(context, models.InstanceGroupPolicy, id, session=session) if set_delete: query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\ soft_delete(synchronize_session=False) query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols)) already_existing = set() for policy_ref in query.all(): already_existing.add(policy_ref.policy) for policy in policies: if policy in already_existing: continue policy_ref = models.InstanceGroupPolicy() policy_ref.update({'policy': policy, 'group_id': id}) session.add(policy_ref) return policies def instance_group_policies_add(context, group_uuid, policies, set_delete=False): id = _instance_group_id(context, group_uuid) return _instance_group_policies_add(context, id, policies, set_delete=set_delete) def instance_group_policy_delete(context, group_uuid, policy): id = _instance_group_id(context, group_uuid) count = _instance_group_get_query(context, models.InstanceGroupPolicy, models.InstanceGroupPolicy.group_id, id).\ filter_by(policy=policy).\ soft_delete() if count == 0: raise exception.InstanceGroupPolicyNotFound(group_uuid=group_uuid, policy=policy) def instance_group_policies_get(context, group_uuid): id = _instance_group_id(context, group_uuid) policies = model_query(context, models.InstanceGroupPolicy.policy, base_model=models.InstanceGroupPolicy).\ filter_by(group_id=id).all() return [policy[0] for policy in policies] #################### @require_admin_context def pci_device_get_by_addr(context, node_id, dev_addr): pci_dev_ref = model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ filter_by(address=dev_addr).\ first() if not pci_dev_ref: raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr) return pci_dev_ref @require_admin_context def pci_device_get_by_id(context, id): pci_dev_ref = model_query(context, models.PciDevice).\ filter_by(id=id).\ first() if not pci_dev_ref: raise exception.PciDeviceNotFoundById(id=id) return pci_dev_ref @require_admin_context def pci_device_get_all_by_node(context, node_id): return model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ all() @require_context def pci_device_get_all_by_instance_uuid(context, instance_uuid): return model_query(context, models.PciDevice).\ filter_by(status='allocated').\ filter_by(instance_uuid=instance_uuid).\ all() def _instance_pcidevs_get_multi(context, instance_uuids, session=None): return model_query(context, models.PciDevice, session=session).\ filter_by(status='allocated').\ filter(models.PciDevice.instance_uuid.in_(instance_uuids)) @require_admin_context def pci_device_destroy(context, node_id, address): result = model_query(context, models.PciDevice).\ filter_by(compute_node_id=node_id).\ filter_by(address=address).\ soft_delete() if not result: raise exception.PciDeviceNotFound(node_id=node_id, address=address) @require_admin_context def pci_device_update(context, node_id, address, values): session = get_session() with session.begin(): device = model_query(context, models.PciDevice, session=session, read_deleted="no").\ filter_by(compute_node_id=node_id).\ filter_by(address=address).\ first() if not device: device = models.PciDevice() device.update(values) session.add(device) return device nova-2014.1.5/nova/db/sqlalchemy/migration.py0000664000567000056700000000554612540642543022123 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository import sqlalchemy from nova.db.sqlalchemy import api as db_session from nova import exception from nova.openstack.common.gettextutils import _ INIT_VERSION = 215 _REPOSITORY = None get_engine = db_session.get_engine def db_sync(version=None): if version is not None: try: version = int(version) except ValueError: raise exception.NovaException(_("version should be an integer")) current_version = db_version() repository = _find_migrate_repo() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version) def db_version(): repository = _find_migrate_repo() try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError: meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: db_version_control(INIT_VERSION) return versioning_api.db_version(get_engine(), repository) else: # Some pre-Essex DB's may not be version controlled. # Require them to upgrade using Essex first. raise exception.NovaException( _("Upgrade DB using Essex release first.")) def db_initial_version(): return INIT_VERSION def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(get_engine(), repository, version) return version def _find_migrate_repo(): """Get the path for the migrate repository.""" global _REPOSITORY path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') assert os.path.exists(path) if _REPOSITORY is None: _REPOSITORY = Repository(path) return _REPOSITORY nova-2014.1.5/nova/db/sqlalchemy/utils.py0000664000567000056700000005604312540642543021270 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me). # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from migrate.changeset import UniqueConstraint, ForeignKeyConstraint from sqlalchemy import Boolean from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy.engine import reflection from sqlalchemy.exc import OperationalError from sqlalchemy.exc import ProgrammingError from sqlalchemy.ext.compiler import compiles from sqlalchemy import func from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import schema from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql.expression import UpdateBase from sqlalchemy.sql import select from sqlalchemy import String from sqlalchemy import Table from sqlalchemy.types import NullType from nova.db.sqlalchemy import api as db from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils LOG = logging.getLogger(__name__) def get_table(engine, name): """Returns an sqlalchemy table dynamically from db. Needed because the models don't work for us in migrations as models will be far out of sync with the current data. """ metadata = MetaData() metadata.bind = engine return Table(name, metadata, autoload=True) class InsertFromSelect(UpdateBase): def __init__(self, table, select): self.table = table self.select = select @compiles(InsertFromSelect) def visit_insert_from_select(element, compiler, **kw): return "INSERT INTO %s %s" % ( compiler.process(element.table, asfrom=True), compiler.process(element.select)) class DeleteFromSelect(UpdateBase): def __init__(self, table, select, column): self.table = table self.select = select self.column = column # NOTE(guochbo): some verions of MySQL doesn't yet support subquery with # 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select . @compiles(DeleteFromSelect) def visit_delete_from_select(element, compiler, **kw): return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % ( compiler.process(element.table, asfrom=True), compiler.process(element.column), element.column.name, compiler.process(element.select)) def _get_not_supported_column(col_name_col_instance, column_name): try: column = col_name_col_instance[column_name] except Exception: msg = _("Please specify column %s in col_name_col_instance " "param. It is required because column has unsupported " "type by sqlite).") raise exception.NovaException(msg % column_name) if not isinstance(column, Column): msg = _("col_name_col_instance param has wrong type of " "column instance for column %s It should be instance " "of sqlalchemy.Column.") raise exception.NovaException(msg % column_name) return column def _get_unique_constraints_in_sqlite(migrate_engine, table_name): regexp = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)" meta = MetaData(bind=migrate_engine) table = Table(table_name, meta, autoload=True) sql_data = migrate_engine.execute( """ SELECT sql FROM sqlite_master WHERE type = 'table' AND name = :table_name; """, table_name=table_name ).fetchone()[0] uniques = set([ schema.UniqueConstraint( *[getattr(table.c, c.strip(' "')) for c in cols.split(",")], name=name ) for name, cols in re.findall(regexp, sql_data) ]) return uniques def _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, **col_name_col_instance): insp = reflection.Inspector.from_engine(migrate_engine) meta = MetaData(bind=migrate_engine) table = Table(table_name, meta, autoload=True) columns = [] for column in table.columns: if isinstance(column.type, NullType): new_column = _get_not_supported_column(col_name_col_instance, column.name) columns.append(new_column) else: columns.append(column.copy()) uniques = _get_unique_constraints_in_sqlite(migrate_engine, table_name) table.constraints.update(uniques) constraints = [constraint for constraint in table.constraints if not constraint.name == uc_name and not isinstance(constraint, schema.ForeignKeyConstraint)] new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) f_keys = [] for fk in insp.get_foreign_keys(table_name): refcolumns = [fk['referred_table'] + '.' + col for col in fk['referred_columns']] f_keys.append(ForeignKeyConstraint(fk['constrained_columns'], refcolumns, table=new_table, name=fk['name'])) ins = InsertFromSelect(new_table, table.select()) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] for fkey in f_keys: fkey.create() new_table.rename(table_name) def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns, **col_name_col_instance): """This method drops UC from table and works for mysql, postgresql and sqlite. In mysql and postgresql we are able to use "alter table" construction. In sqlite is only one way to drop UC: 1) Create new table with same columns, indexes and constraints (except one that we want to drop). 2) Copy data from old table to new. 3) Drop old table. 4) Rename new table to the name of old table. :param migrate_engine: sqlalchemy engine :param table_name: name of table that contains uniq constraint. :param uc_name: name of uniq constraint that will be dropped. :param columns: columns that are in uniq constraint. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. """ if migrate_engine.name == "sqlite": _drop_unique_constraint_in_sqlite(migrate_engine, table_name, uc_name, **col_name_col_instance) else: meta = MetaData() meta.bind = migrate_engine t = Table(table_name, meta, autoload=True) uc = UniqueConstraint(*columns, table=t, name=uc_name) uc.drop() def drop_old_duplicate_entries_from_table(migrate_engine, table_name, use_soft_delete, *uc_column_names): """This method is used to drop all old rows that have the same values for columns in uc_columns. """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) columns_for_group_by = [table.c[name] for name in uc_column_names] columns_for_select = [func.max(table.c.id)] columns_for_select.extend(list(columns_for_group_by)) duplicated_rows_select = select(columns_for_select, group_by=columns_for_group_by, having=func.count(table.c.id) > 1) for row in migrate_engine.execute(duplicated_rows_select): # NOTE(boris-42): Do not remove row that has the biggest ID. delete_condition = table.c.id != row[0] for name in uc_column_names: delete_condition &= table.c[name] == row[name] rows_to_delete_select = select([table.c.id]).where(delete_condition) for row in migrate_engine.execute(rows_to_delete_select).fetchall(): LOG.info(_("Deleted duplicated row with id: %(id)s from table: " "%(table)s") % dict(id=row[0], table=table_name)) if use_soft_delete: delete_statement = table.update().\ where(delete_condition).\ values({ 'deleted': literal_column('id'), 'updated_at': literal_column('updated_at'), 'deleted_at': timeutils.utcnow() }) else: delete_statement = table.delete().where(delete_condition) migrate_engine.execute(delete_statement) def check_shadow_table(migrate_engine, table_name): """This method checks that table with ``table_name`` and corresponding shadow table have same columns. """ meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta, autoload=True) columns = dict([(c.name, c) for c in table.columns]) shadow_columns = dict([(c.name, c) for c in shadow_table.columns]) for name, column in columns.iteritems(): if name not in shadow_columns: raise exception.NovaException( _("Missing column %(table)s.%(column)s in shadow table") % {'column': name, 'table': shadow_table.name}) shadow_column = shadow_columns[name] if not isinstance(shadow_column.type, type(column.type)): raise exception.NovaException( _("Different types in %(table)s.%(column)s and shadow table: " "%(c_type)s %(shadow_c_type)s") % {'column': name, 'table': table.name, 'c_type': column.type, 'shadow_c_type': shadow_column.type}) for name, column in shadow_columns.iteritems(): if name not in columns: raise exception.NovaException( _("Extra column %(table)s.%(column)s in shadow table") % {'column': name, 'table': shadow_table.name}) return True def create_shadow_table(migrate_engine, table_name=None, table=None, **col_name_col_instance): """This method create shadow table for table with name ``table_name`` or table instance ``table``. :param table_name: Autoload table with this name and create shadow table :param table: Autoloaded table, so just create corresponding shadow table. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. :returns: The created shadow_table object. """ meta = MetaData(bind=migrate_engine) if table_name is None and table is None: raise exception.NovaException(_("Specify `table_name` or `table` " "param")) if not (table_name is None or table is None): raise exception.NovaException(_("Specify only one param `table_name` " "`table`")) if table is None: table = Table(table_name, meta, autoload=True) columns = [] for column in table.columns: if isinstance(column.type, NullType): new_column = _get_not_supported_column(col_name_col_instance, column.name) columns.append(new_column) else: columns.append(column.copy()) shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name shadow_table = Table(shadow_table_name, meta, *columns, mysql_engine='InnoDB') try: shadow_table.create() return shadow_table except (OperationalError, ProgrammingError): LOG.info(repr(shadow_table)) LOG.exception(_('Exception while creating table.')) raise exception.ShadowTableExists(name=shadow_table_name) except Exception: LOG.info(repr(shadow_table)) LOG.exception(_('Exception while creating table.')) def _get_default_deleted_value(table): if isinstance(table.c.id.type, Integer): return 0 if isinstance(table.c.id.type, String): return "" raise exception.NovaException(_("Unsupported id columns type")) def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes): table = get_table(migrate_engine, table_name) insp = reflection.Inspector.from_engine(migrate_engine) real_indexes = insp.get_indexes(table_name) existing_index_names = dict([(index['name'], index['column_names']) for index in real_indexes]) # NOTE(boris-42): Restore indexes on `deleted` column for index in indexes: if 'deleted' not in index['column_names']: continue name = index['name'] if name in existing_index_names: column_names = [table.c[c] for c in existing_index_names[name]] old_index = Index(name, *column_names, unique=index["unique"]) old_index.drop(migrate_engine) column_names = [table.c[c] for c in index['column_names']] new_index = Index(index["name"], *column_names, unique=index["unique"]) new_index.create(migrate_engine) def change_deleted_column_type_to_boolean(migrate_engine, table_name, **col_name_col_instance): if migrate_engine.name == "sqlite": return _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, **col_name_col_instance) insp = reflection.Inspector.from_engine(migrate_engine) indexes = insp.get_indexes(table_name) table = get_table(migrate_engine, table_name) old_deleted = Column('old_deleted', Boolean, default=False) old_deleted.create(table, populate_default=False) table.update().\ where(table.c.deleted == table.c.id).\ values(old_deleted=True).\ execute() table.c.deleted.drop() table.c.old_deleted.alter(name="deleted") _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name, **col_name_col_instance): insp = reflection.Inspector.from_engine(migrate_engine) table = get_table(migrate_engine, table_name) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', Boolean, default=0) columns.append(column_copy) constraints = [constraint.copy() for constraint in table.constraints] meta = MetaData(bind=migrate_engine) new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) c_select = [] for c in table.c: if c.name != "deleted": c_select.append(c) else: c_select.append(table.c.deleted == table.c.id) ins = InsertFromSelect(new_table, select(c_select)) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] new_table.rename(table_name) new_table.update().\ where(new_table.c.deleted == new_table.c.id).\ values(deleted=True).\ execute() def change_deleted_column_type_to_id_type(migrate_engine, table_name, **col_name_col_instance): if migrate_engine.name == "sqlite": return _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, **col_name_col_instance) insp = reflection.Inspector.from_engine(migrate_engine) indexes = insp.get_indexes(table_name) table = get_table(migrate_engine, table_name) new_deleted = Column('new_deleted', table.c.id.type, default=_get_default_deleted_value(table)) new_deleted.create(table, populate_default=True) table.update().\ where(table.c.deleted == True).\ values(new_deleted=table.c.id).\ execute() table.c.deleted.drop() table.c.new_deleted.alter(name="deleted") _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes) def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name, **col_name_col_instance): # NOTE(boris-42): sqlaclhemy-migrate can't drop column with check # constraints in sqlite DB and our `deleted` column has # 2 check constraints. So there is only one way to remove # these constraints: # 1) Create new table with the same columns, constraints # and indexes. (except deleted column). # 2) Copy all data from old to new table. # 3) Drop old table. # 4) Rename new table to old table name. insp = reflection.Inspector.from_engine(migrate_engine) meta = MetaData(bind=migrate_engine) table = Table(table_name, meta, autoload=True) default_deleted_value = _get_default_deleted_value(table) columns = [] for column in table.columns: column_copy = None if column.name != "deleted": if isinstance(column.type, NullType): column_copy = _get_not_supported_column(col_name_col_instance, column.name) else: column_copy = column.copy() else: column_copy = Column('deleted', table.c.id.type, default=default_deleted_value) columns.append(column_copy) def is_deleted_column_constraint(constraint): # NOTE(boris-42): There is no other way to check is CheckConstraint # associated with deleted column. if not isinstance(constraint, CheckConstraint): return False sqltext = str(constraint.sqltext) # NOTE(I159): when the type of column `deleted` is changed from boolean # to int, the corresponding CHECK constraint is dropped too. But # starting from SQLAlchemy version 0.8.3, those CHECK constraints # aren't dropped anymore. So despite the fact that column deleted is # of type int now, we still restrict its values to be either 0 or 1. constraint_markers = ( "deleted in (0, 1)", "deleted IN (:deleted_1, :deleted_2)", "deleted IN (:param_1, :param_2)" ) return any(sqltext.endswith(marker) for marker in constraint_markers) constraints = [] for constraint in table.constraints: if not is_deleted_column_constraint(constraint): constraints.append(constraint.copy()) new_table = Table(table_name + "__tmp__", meta, *(columns + constraints)) new_table.create() indexes = [] for index in insp.get_indexes(table_name): column_names = [new_table.c[c] for c in index['column_names']] indexes.append(Index(index["name"], *column_names, unique=index["unique"])) ins = InsertFromSelect(new_table, table.select()) migrate_engine.execute(ins) table.drop() [index.create(migrate_engine) for index in indexes] new_table.rename(table_name) new_table.update().\ where(new_table.c.deleted == True).\ values(deleted=new_table.c.id).\ execute() # NOTE(boris-42): Fix value of deleted column: False -> "" or 0. new_table.update().\ where(new_table.c.deleted == False).\ values(deleted=default_deleted_value).\ execute() def _index_exists(migrate_engine, table_name, index_name): inspector = reflection.Inspector.from_engine(migrate_engine) indexes = inspector.get_indexes(table_name) index_names = [index['name'] for index in indexes] return index_name in index_names def _add_index(migrate_engine, table, index_name, idx_columns): index = Index( index_name, *[getattr(table.c, col) for col in idx_columns] ) index.create() def _drop_index(migrate_engine, table, index_name, idx_columns): if _index_exists(migrate_engine, table.name, index_name): index = Index( index_name, *[getattr(table.c, col) for col in idx_columns] ) index.drop() def _change_index_columns(migrate_engine, table, index_name, new_columns, old_columns): _drop_index(migrate_engine, table, index_name, old_columns) _add_index(migrate_engine, table, index_name, new_columns) def modify_indexes(migrate_engine, data, upgrade=True): if migrate_engine.name == 'sqlite': return meta = MetaData() meta.bind = migrate_engine for table_name, indexes in data.iteritems(): table = Table(table_name, meta, autoload=True) for index_name, old_columns, new_columns in indexes: if not upgrade: new_columns, old_columns = old_columns, new_columns if migrate_engine.name == 'postgresql': if upgrade: _add_index(migrate_engine, table, index_name, new_columns) else: _drop_index(migrate_engine, table, index_name, old_columns) elif migrate_engine.name == 'mysql': _change_index_columns(migrate_engine, table, index_name, new_columns, old_columns) else: raise ValueError('Unsupported DB %s' % migrate_engine.name) nova-2014.1.5/nova/db/__init__.py0000664000567000056700000000143612540642532017517 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ DB abstraction for Nova """ from nova.db.api import * # noqa nova-2014.1.5/nova/db/api.py0000664000567000056700000017647212540642543016550 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. Functions in this module are imported into the nova.db namespace. Call these functions from nova.db namespace, not the nova.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. """ from oslo.config import cfg from nova.cells import rpcapi as cells_rpcapi from nova.openstack.common.db import api as db_api from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging db_opts = [ cfg.BoolOpt('enable_new_services', default=True, help='Services to be added to the available pool on create'), cfg.StrOpt('instance_name_template', default='instance-%08x', help='Template string to be used to generate instance names'), cfg.StrOpt('snapshot_name_template', default='snapshot-%s', help='Template string to be used to generate snapshot names'), ] CONF = cfg.CONF CONF.register_opts(db_opts) CONF.import_opt('backend', 'nova.openstack.common.db.options', group='database') _BACKEND_MAPPING = {'sqlalchemy': 'nova.db.sqlalchemy.api'} IMPL = db_api.DBAPI(CONF.database.backend, backend_mapping=_BACKEND_MAPPING, lazy=True) LOG = logging.getLogger(__name__) # The maximum value a signed INT type may have MAX_INT = 0x7FFFFFFF ################### def constraint(**conditions): """Return a constraint object suitable for use with some updates.""" return IMPL.constraint(**conditions) def equal_any(*values): """Return an equality condition object suitable for use in a constraint. Equal_any conditions require that a model object's attribute equal any one of the given values. """ return IMPL.equal_any(*values) def not_equal(*values): """Return an inequality condition object suitable for use in a constraint. Not_equal conditions require that a model object's attribute differs from all of the given values. """ return IMPL.not_equal(*values) ################### def service_destroy(context, service_id): """Destroy the service or raise if it does not exist.""" return IMPL.service_destroy(context, service_id) def service_get(context, service_id): """Get a service or raise if it does not exist.""" return IMPL.service_get(context, service_id) def service_get_by_host_and_topic(context, host, topic): """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) def service_get_all(context, disabled=None): """Get all services.""" return IMPL.service_get_all(context, disabled) def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) def service_get_all_by_host(context, host): """Get all services for a given host.""" return IMPL.service_get_all_by_host(context, host) def service_get_by_compute_host(context, host): """Get the service entry for a given compute host. Returns the service entry joined with the compute_node entry. """ return IMPL.service_get_by_compute_host(context, host) def service_get_by_args(context, host, binary): """Get the state of a service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) def service_create(context, values): """Create a service from the values dictionary.""" return IMPL.service_create(context, values) def service_update(context, service_id, values): """Set the given properties on a service and update it. Raises NotFound if service does not exist. """ return IMPL.service_update(context, service_id, values) ################### def compute_node_get(context, compute_id): """Get a compute node by its id. :param context: The security context :param compute_id: ID of the compute node :returns: Dictionary-like object containing properties of the compute node, including its corresponding service Raises ComputeHostNotFound if compute node with the given ID doesn't exist. """ return IMPL.compute_node_get(context, compute_id) def compute_node_get_by_service_id(context, service_id): """Get a compute node by its associated service id. :param context: The security context :param compute_id: ID of the associated service :returns: Dictionary-like object containing properties of the compute node, including its corresponding service and statistics Raises ServiceNotFound if service with the given ID doesn't exist. """ return IMPL.compute_node_get_by_service_id(context, service_id) def compute_node_get_all(context, no_date_fields=False): """Get all computeNodes. :param context: The security context :param no_date_fields: If set to True, excludes 'created_at', 'updated_at', 'deleted_at' and 'deleted' fields from the output, thus significantly reducing its size. Set to False by default :returns: List of dictionaries each containing compute node properties, including corresponding service """ return IMPL.compute_node_get_all(context, no_date_fields) def compute_node_search_by_hypervisor(context, hypervisor_match): """Get compute nodes by hypervisor hostname. :param context: The security context :param hypervisor_match: The hypervisor hostname :returns: List of dictionary-like objects each containing compute node properties, including corresponding service """ return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match) def compute_node_create(context, values): """Create a compute node from the values dictionary. :param context: The security context :param values: Dictionary containing compute node properties :returns: Dictionary-like object containing the properties of the created node, including its corresponding service and statistics """ return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values): """Set the given properties on a compute node and update it. :param context: The security context :param compute_id: ID of the compute node :param values: Dictionary containing compute node properties to be updated :returns: Dictionary-like object containing the properties of the updated compute node, including its corresponding service and statistics Raises ComputeHostNotFound if compute node with the given ID doesn't exist. """ return IMPL.compute_node_update(context, compute_id, values) def compute_node_delete(context, compute_id): """Delete a compute node from the database. :param context: The security context :param compute_id: ID of the compute node Raises ComputeHostNotFound if compute node with the given ID doesn't exist. """ return IMPL.compute_node_delete(context, compute_id) def compute_node_statistics(context): """Get aggregate statistics over all compute nodes. :param context: The security context :returns: Dictionary containing compute node characteristics summed up over all the compute nodes, e.g. 'vcpus', 'free_ram_mb' etc. """ return IMPL.compute_node_statistics(context) ################### def certificate_create(context, values): """Create a certificate from the values dictionary.""" return IMPL.certificate_create(context, values) def certificate_get_all_by_project(context, project_id): """Get all certificates for a project.""" return IMPL.certificate_get_all_by_project(context, project_id) def certificate_get_all_by_user(context, user_id): """Get all certificates for a user.""" return IMPL.certificate_get_all_by_user(context, user_id) def certificate_get_all_by_user_and_project(context, user_id, project_id): """Get all certificates for a user and project.""" return IMPL.certificate_get_all_by_user_and_project(context, user_id, project_id) ################### def floating_ip_get(context, id): return IMPL.floating_ip_get(context, id) def floating_ip_get_pools(context): """Returns a list of floating ip pools.""" return IMPL.floating_ip_get_pools(context) def floating_ip_allocate_address(context, project_id, pool, auto_assigned=False): """Allocate free floating ip from specified pool and return the address. Raises if one is not available. """ return IMPL.floating_ip_allocate_address(context, project_id, pool, auto_assigned) def floating_ip_bulk_create(context, ips): """Create a lot of floating ips from the values dictionary.""" return IMPL.floating_ip_bulk_create(context, ips) def floating_ip_bulk_destroy(context, ips): """Destroy a lot of floating ips from the values dictionary.""" return IMPL.floating_ip_bulk_destroy(context, ips) def floating_ip_create(context, values): """Create a floating ip from the values dictionary.""" return IMPL.floating_ip_create(context, values) def floating_ip_deallocate(context, address): """Deallocate a floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) def floating_ip_destroy(context, address): """Destroy the floating_ip or raise if it does not exist.""" return IMPL.floating_ip_destroy(context, address) def floating_ip_disassociate(context, address): """Disassociate a floating ip from a fixed ip by address. :returns: the fixed ip record joined to network record or None if the ip was not associated to an ip. """ return IMPL.floating_ip_disassociate(context, address) def floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host): """Associate a floating ip to a fixed_ip by address. :returns: the fixed ip record joined to network record or None if the ip was already associated to the fixed ip. """ return IMPL.floating_ip_fixed_ip_associate(context, floating_address, fixed_address, host) def floating_ip_get_all(context): """Get all floating ips.""" return IMPL.floating_ip_get_all(context) def floating_ip_get_all_by_host(context, host): """Get all floating ips by host.""" return IMPL.floating_ip_get_all_by_host(context, host) def floating_ip_get_all_by_project(context, project_id): """Get all floating ips by project.""" return IMPL.floating_ip_get_all_by_project(context, project_id) def floating_ip_get_by_address(context, address): """Get a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_get_by_address(context, address) def floating_ip_get_by_fixed_address(context, fixed_address): """Get a floating ips by fixed address.""" return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): """Get a floating ips by fixed address.""" return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) def floating_ip_update(context, address, values): """Update a floating ip by address or raise if it doesn't exist.""" return IMPL.floating_ip_update(context, address, values) def floating_ip_set_auto_assigned(context, address): """Set auto_assigned flag to floating ip.""" return IMPL.floating_ip_set_auto_assigned(context, address) def dnsdomain_list(context): """Get a list of all zones in our database, public and private.""" return IMPL.dnsdomain_list(context) def dnsdomain_get_all(context): """Get a list of all dnsdomains in our database.""" return IMPL.dnsdomain_get_all(context) def dnsdomain_register_for_zone(context, fqdomain, zone): """Associated a DNS domain with an availability zone.""" return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) def dnsdomain_register_for_project(context, fqdomain, project): """Associated a DNS domain with a project id.""" return IMPL.dnsdomain_register_for_project(context, fqdomain, project) def dnsdomain_unregister(context, fqdomain): """Purge associations for the specified DNS zone.""" return IMPL.dnsdomain_unregister(context, fqdomain) def dnsdomain_get(context, fqdomain): """Get the db record for the specified domain.""" return IMPL.dnsdomain_get(context, fqdomain) #################### def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_uuid, status): """Finds a migration by the instance uuid its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_uuid, status) def migration_get_unconfirmed_by_dest_compute(context, confirm_window, dest_compute, use_slave=False): """Finds all unconfirmed migrations within the confirmation window for a specific destination compute host. """ return IMPL.migration_get_unconfirmed_by_dest_compute(context, confirm_window, dest_compute, use_slave=use_slave) def migration_get_in_progress_by_host_and_node(context, host, node): """Finds all migrations for the given host + node that are not yet confirmed or reverted. """ return IMPL.migration_get_in_progress_by_host_and_node(context, host, node) def migration_get_all_by_filters(context, filters): """Finds all migrations in progress.""" return IMPL.migration_get_all_by_filters(context, filters) #################### def fixed_ip_associate(context, address, instance_uuid, network_id=None, reserved=False): """Associate fixed ip to instance. Raises if fixed ip is not available. """ return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id, reserved) def fixed_ip_associate_pool(context, network_id, instance_uuid=None, host=None): """Find free ip in network and associate it to instance or host. Raises if one is not available. """ return IMPL.fixed_ip_associate_pool(context, network_id, instance_uuid, host) def fixed_ip_create(context, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_create(context, values) def fixed_ip_bulk_create(context, ips): """Create a lot of fixed ips from the values dictionary.""" return IMPL.fixed_ip_bulk_create(context, ips) def fixed_ip_disassociate(context, address): """Disassociate a fixed ip from an instance by address.""" return IMPL.fixed_ip_disassociate(context, address) def fixed_ip_disassociate_all_by_timeout(context, host, time): """Disassociate old fixed ips from host.""" return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time) def fixed_ip_get(context, id, get_network=False): """Get fixed ip by id or raise if it does not exist. If get_network is true, also return the associated network. """ return IMPL.fixed_ip_get(context, id, get_network) def fixed_ip_get_all(context): """Get all defined fixed ips.""" return IMPL.fixed_ip_get_all(context) def fixed_ip_get_by_address(context, address, columns_to_join=None): """Get a fixed ip by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address(context, address, columns_to_join=columns_to_join) def fixed_ip_get_by_address_detailed(context, address): """Get detailed fixed ip info by address or raise if it does not exist.""" return IMPL.fixed_ip_get_by_address_detailed(context, address) def fixed_ip_get_by_floating_address(context, floating_address): """Get a fixed ip by a floating address.""" return IMPL.fixed_ip_get_by_floating_address(context, floating_address) def fixed_ip_get_by_instance(context, instance_uuid): """Get fixed ips by instance or raise if none exist.""" return IMPL.fixed_ip_get_by_instance(context, instance_uuid) def fixed_ip_get_by_host(context, host): """Get fixed ips by compute host.""" return IMPL.fixed_ip_get_by_host(context, host) def fixed_ip_get_by_network_host(context, network_uuid, host): """Get fixed ip for a host in a network.""" return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host) def fixed_ips_by_virtual_interface(context, vif_id): """Get fixed ips by virtual interface or raise if none exist.""" return IMPL.fixed_ips_by_virtual_interface(context, vif_id) def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) #################### def virtual_interface_create(context, values): """Create a virtual interface record in the database.""" return IMPL.virtual_interface_create(context, values) def virtual_interface_get(context, vif_id): """Gets a virtual interface from the table.""" return IMPL.virtual_interface_get(context, vif_id) def virtual_interface_get_by_address(context, address): """Gets a virtual interface from the table filtering on address.""" return IMPL.virtual_interface_get_by_address(context, address) def virtual_interface_get_by_uuid(context, vif_uuid): """Gets a virtual interface from the table filtering on vif uuid.""" return IMPL.virtual_interface_get_by_uuid(context, vif_uuid) def virtual_interface_get_by_instance(context, instance_id, use_slave=False): """Gets all virtual_interfaces for instance.""" return IMPL.virtual_interface_get_by_instance(context, instance_id, use_slave=use_slave) def virtual_interface_get_by_instance_and_network(context, instance_id, network_id): """Gets all virtual interfaces for instance.""" return IMPL.virtual_interface_get_by_instance_and_network(context, instance_id, network_id) def virtual_interface_delete_by_instance(context, instance_id): """Delete virtual interface records associated with instance.""" return IMPL.virtual_interface_delete_by_instance(context, instance_id) def virtual_interface_get_all(context): """Gets all virtual interfaces from the table.""" return IMPL.virtual_interface_get_all(context) #################### def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) def instance_destroy(context, instance_uuid, constraint=None, update_cells=True): """Destroy the instance or raise if it does not exist.""" rv = IMPL.instance_destroy(context, instance_uuid, constraint) if update_cells: try: cells_rpcapi.CellsAPI().instance_destroy_at_top(context, rv) except Exception: LOG.exception(_("Failed to notify cells of instance destroy")) return rv def instance_get_by_uuid(context, uuid, columns_to_join=None, use_slave=False): """Get an instance or raise if it does not exist.""" return IMPL.instance_get_by_uuid(context, uuid, columns_to_join, use_slave=use_slave) def instance_get(context, instance_id, columns_to_join=None): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id, columns_to_join=columns_to_join) def instance_get_all(context, columns_to_join=None): """Get all instances.""" return IMPL.instance_get_all(context, columns_to_join=columns_to_join) def instance_get_all_by_filters(context, filters, sort_key='created_at', sort_dir='desc', limit=None, marker=None, columns_to_join=None, use_slave=False): """Get all instances that match all filters.""" return IMPL.instance_get_all_by_filters(context, filters, sort_key, sort_dir, limit=limit, marker=marker, columns_to_join=columns_to_join, use_slave=use_slave) def instance_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None): """Get instances and joins active during a certain time window. Specifying a project_id will filter for a certain project. Specifying a host will filter for instances on a given compute host. """ return IMPL.instance_get_active_by_window_joined(context, begin, end, project_id, host) def instance_get_all_by_host(context, host, columns_to_join=None, use_slave=False): """Get all instances belonging to a host.""" return IMPL.instance_get_all_by_host(context, host, columns_to_join, use_slave=use_slave) def instance_get_all_by_host_and_node(context, host, node): """Get all instances belonging to a node.""" return IMPL.instance_get_all_by_host_and_node(context, host, node) def instance_get_all_by_host_and_not_type(context, host, type_id=None): """Get all instances belonging to a host with a different type_id.""" return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id) def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) def instance_floating_address_get_all(context, instance_uuid): """Get all floating ip addresses of an instance.""" return IMPL.instance_floating_address_get_all(context, instance_uuid) # NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0. def instance_get_all_hung_in_rebooting(context, reboot_window): """Get all instances stuck in a rebooting state.""" return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window) def instance_update(context, instance_uuid, values, update_cells=True): """Set the given properties on an instance and update it. Raises NotFound if instance does not exist. """ rv = IMPL.instance_update(context, instance_uuid, values) if update_cells: try: cells_rpcapi.CellsAPI().instance_update_at_top(context, rv) except Exception: LOG.exception(_("Failed to notify cells of instance update")) return rv # FIXME(comstud): 'update_cells' is temporary as we transition to using # objects. When everything is using Instance.save(), we can remove the # argument and the RPC to nova-cells. def instance_update_and_get_original(context, instance_uuid, values, update_cells=True, columns_to_join=None): """Set the given properties on an instance and update it. Return a shallow copy of the original instance reference, as well as the updated one. :param context: = request context object :param instance_id: = instance id or uuid :param values: = dict containing column values :returns: a tuple of the form (old_instance_ref, new_instance_ref) Raises NotFound if instance does not exist. """ rv = IMPL.instance_update_and_get_original(context, instance_uuid, values, columns_to_join=columns_to_join) if update_cells: try: cells_rpcapi.CellsAPI().instance_update_at_top(context, rv[1]) except Exception: LOG.exception(_("Failed to notify cells of instance update")) return rv def instance_add_security_group(context, instance_id, security_group_id): """Associate the given security group with the given instance.""" return IMPL.instance_add_security_group(context, instance_id, security_group_id) def instance_remove_security_group(context, instance_id, security_group_id): """Disassociate the given security group from the given instance.""" return IMPL.instance_remove_security_group(context, instance_id, security_group_id) #################### def instance_group_create(context, values, policies=None, metadata=None, members=None): """Create a new group with metadata. Each group will receive a unique uuid. This will be used for access to the group. """ return IMPL.instance_group_create(context, values, policies, metadata, members) def instance_group_get(context, group_uuid): """Get a specific group by id.""" return IMPL.instance_group_get(context, group_uuid) def instance_group_update(context, group_uuid, values): """Update the attributes of an group.""" return IMPL.instance_group_update(context, group_uuid, values) def instance_group_delete(context, group_uuid): """Delete an group.""" return IMPL.instance_group_delete(context, group_uuid) def instance_group_get_all(context): """Get all groups.""" return IMPL.instance_group_get_all(context) def instance_group_get_all_by_project_id(context, project_id): """Get all groups for a specific project_id.""" return IMPL.instance_group_get_all_by_project_id(context, project_id) def instance_group_metadata_add(context, group_uuid, metadata, set_delete=False): """Add metadata to the group.""" return IMPL.instance_group_metadata_add(context, group_uuid, metadata, set_delete) def instance_group_metadata_delete(context, group_uuid, key): """Delete metadata from the group.""" return IMPL.instance_group_metadata_delete(context, group_uuid, key) def instance_group_metadata_get(context, group_uuid): """Get the metadata from the group.""" return IMPL.instance_group_metadata_get(context, group_uuid) def instance_group_members_add(context, group_uuid, members, set_delete=False): """Add members to the group.""" return IMPL.instance_group_members_add(context, group_uuid, members, set_delete=set_delete) def instance_group_member_delete(context, group_uuid, instance_id): """Delete a specific member from the group.""" return IMPL.instance_group_member_delete(context, group_uuid, instance_id) def instance_group_members_get(context, group_uuid): """Get the members from the group.""" return IMPL.instance_group_members_get(context, group_uuid) def instance_group_policies_add(context, group_uuid, policies, set_delete=False): """Add policies to the group.""" return IMPL.instance_group_policies_add(context, group_uuid, policies, set_delete=set_delete) def instance_group_policy_delete(context, group_uuid, policy): """Delete a specific policy from the group.""" return IMPL.instance_group_policy_delete(context, group_uuid, policy) def instance_group_policies_get(context, group_uuid): """Get the policies from the group.""" return IMPL.instance_group_policies_get(context, group_uuid) ################### def instance_info_cache_get(context, instance_uuid): """Gets an instance info cache from the table. :param instance_uuid: = uuid of the info cache's instance """ return IMPL.instance_info_cache_get(context, instance_uuid) def instance_info_cache_update(context, instance_uuid, values): """Update an instance info cache record in the table. :param instance_uuid: = uuid of info cache's instance :param values: = dict containing column values to update """ return IMPL.instance_info_cache_update(context, instance_uuid, values) def instance_info_cache_delete(context, instance_uuid): """Deletes an existing instance_info_cache record :param instance_uuid: = uuid of the instance tied to the cache record """ return IMPL.instance_info_cache_delete(context, instance_uuid) ################### def key_pair_create(context, values): """Create a key_pair from the values dictionary.""" return IMPL.key_pair_create(context, values) def key_pair_destroy(context, user_id, name): """Destroy the key_pair or raise if it does not exist.""" return IMPL.key_pair_destroy(context, user_id, name) def key_pair_get(context, user_id, name): """Get a key_pair or raise if it does not exist.""" return IMPL.key_pair_get(context, user_id, name) def key_pair_get_all_by_user(context, user_id): """Get all key_pairs by user.""" return IMPL.key_pair_get_all_by_user(context, user_id) def key_pair_count_by_user(context, user_id): """Count number of key pairs for the given user ID.""" return IMPL.key_pair_count_by_user(context, user_id) #################### def network_associate(context, project_id, network_id=None, force=False): """Associate a free network to a project.""" return IMPL.network_associate(context, project_id, network_id, force) def network_count_reserved_ips(context, network_id): """Return the number of reserved ips in the network.""" return IMPL.network_count_reserved_ips(context, network_id) def network_create_safe(context, values): """Create a network from the values dict. The network is only returned if the create succeeds. If the create violates constraints because the network already exists, no exception is raised. """ return IMPL.network_create_safe(context, values) def network_delete_safe(context, network_id): """Delete network with key network_id. This method assumes that the network is not associated with any project """ return IMPL.network_delete_safe(context, network_id) def network_disassociate(context, network_id, disassociate_host=True, disassociate_project=True): """Disassociate the network from project or host Raises if it does not exist. """ return IMPL.network_disassociate(context, network_id, disassociate_host, disassociate_project) def network_get(context, network_id, project_only="allow_none"): """Get a network or raise if it does not exist.""" return IMPL.network_get(context, network_id, project_only=project_only) def network_get_all(context, project_only="allow_none"): """Return all defined networks.""" return IMPL.network_get_all(context, project_only) def network_get_all_by_uuids(context, network_uuids, project_only="allow_none"): """Return networks by ids.""" return IMPL.network_get_all_by_uuids(context, network_uuids, project_only=project_only) # pylint: disable=C0103 def network_in_use_on_host(context, network_id, host=None): """Indicates if a network is currently in use on host.""" return IMPL.network_in_use_on_host(context, network_id, host) def network_get_associated_fixed_ips(context, network_id, host=None): """Get all network's ips that have been associated.""" return IMPL.network_get_associated_fixed_ips(context, network_id, host) def network_get_by_uuid(context, uuid): """Get a network by uuid or raise if it does not exist.""" return IMPL.network_get_by_uuid(context, uuid) def network_get_by_cidr(context, cidr): """Get a network by cidr or raise if it does not exist.""" return IMPL.network_get_by_cidr(context, cidr) def network_get_all_by_host(context, host): """All networks for which the given host is the network host.""" return IMPL.network_get_all_by_host(context, host) def network_set_host(context, network_id, host_id): """Safely set the host for network.""" return IMPL.network_set_host(context, network_id, host_id) def network_update(context, network_id, values): """Set the given properties on a network and update it. Raises NotFound if network does not exist. """ return IMPL.network_update(context, network_id, values) ############### def quota_create(context, project_id, resource, limit, user_id=None): """Create a quota for the given project and resource.""" return IMPL.quota_create(context, project_id, resource, limit, user_id=user_id) def quota_get(context, project_id, resource, user_id=None): """Retrieve a quota or raise if it does not exist.""" return IMPL.quota_get(context, project_id, resource, user_id=user_id) def quota_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all quotas associated with a given project and user.""" return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id) def quota_get_all_by_project(context, project_id): """Retrieve all quotas associated with a given project.""" return IMPL.quota_get_all_by_project(context, project_id) def quota_get_all(context, project_id): """Retrieve all user quotas associated with a given project.""" return IMPL.quota_get_all(context, project_id) def quota_update(context, project_id, resource, limit, user_id=None): """Update a quota or raise if it does not exist.""" return IMPL.quota_update(context, project_id, resource, limit, user_id=user_id) ################### def quota_class_create(context, class_name, resource, limit): """Create a quota class for the given name and resource.""" return IMPL.quota_class_create(context, class_name, resource, limit) def quota_class_get(context, class_name, resource): """Retrieve a quota class or raise if it does not exist.""" return IMPL.quota_class_get(context, class_name, resource) def quota_class_get_default(context): """Retrieve all default quotas.""" return IMPL.quota_class_get_default(context) def quota_class_get_all_by_name(context, class_name): """Retrieve all quotas associated with a given quota class.""" return IMPL.quota_class_get_all_by_name(context, class_name) def quota_class_update(context, class_name, resource, limit): """Update a quota class or raise if it does not exist.""" return IMPL.quota_class_update(context, class_name, resource, limit) ################### def quota_usage_get(context, project_id, resource, user_id=None): """Retrieve a quota usage or raise if it does not exist.""" return IMPL.quota_usage_get(context, project_id, resource, user_id=user_id) def quota_usage_get_all_by_project_and_user(context, project_id, user_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project_and_user(context, project_id, user_id) def quota_usage_get_all_by_project(context, project_id): """Retrieve all usage associated with a given resource.""" return IMPL.quota_usage_get_all_by_project(context, project_id) def quota_usage_update(context, project_id, user_id, resource, **kwargs): """Update a quota usage or raise if it does not exist.""" return IMPL.quota_usage_update(context, project_id, user_id, resource, **kwargs) ################### def quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=None, user_id=None): """Check quotas and create appropriate reservations.""" return IMPL.quota_reserve(context, resources, quotas, user_quotas, deltas, expire, until_refresh, max_age, project_id=project_id, user_id=user_id) def reservation_commit(context, reservations, project_id=None, user_id=None): """Commit quota reservations.""" return IMPL.reservation_commit(context, reservations, project_id=project_id, user_id=user_id) def reservation_rollback(context, reservations, project_id=None, user_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id) def quota_destroy_all_by_project_and_user(context, project_id, user_id): """Destroy all quotas associated with a given project and user.""" return IMPL.quota_destroy_all_by_project_and_user(context, project_id, user_id) def quota_destroy_all_by_project(context, project_id): """Destroy all quotas associated with a given project.""" return IMPL.quota_destroy_all_by_project(context, project_id) def reservation_expire(context): """Roll back any expired reservations.""" return IMPL.reservation_expire(context) ################### def get_ec2_volume_id_by_uuid(context, volume_id): return IMPL.get_ec2_volume_id_by_uuid(context, volume_id) def get_volume_uuid_by_ec2_id(context, ec2_id): return IMPL.get_volume_uuid_by_ec2_id(context, ec2_id) def ec2_volume_create(context, volume_id, forced_id=None): return IMPL.ec2_volume_create(context, volume_id, forced_id) def get_snapshot_uuid_by_ec2_id(context, ec2_id): return IMPL.get_snapshot_uuid_by_ec2_id(context, ec2_id) def get_ec2_snapshot_id_by_uuid(context, snapshot_id): return IMPL.get_ec2_snapshot_id_by_uuid(context, snapshot_id) def ec2_snapshot_create(context, snapshot_id, forced_id=None): return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id) #################### def block_device_mapping_create(context, values, legacy=True): """Create an entry of block device mapping.""" return IMPL.block_device_mapping_create(context, values, legacy) def block_device_mapping_update(context, bdm_id, values, legacy=True): """Update an entry of block device mapping.""" return IMPL.block_device_mapping_update(context, bdm_id, values, legacy) def block_device_mapping_update_or_create(context, values, legacy=True): """Update an entry of block device mapping. If not existed, create a new entry """ return IMPL.block_device_mapping_update_or_create(context, values, legacy) def block_device_mapping_get_all_by_instance(context, instance_uuid, use_slave=False): """Get all block device mapping belonging to an instance.""" return IMPL.block_device_mapping_get_all_by_instance(context, instance_uuid, use_slave) def block_device_mapping_get_by_volume_id(context, volume_id, columns_to_join=None): """Get block device mapping for a given volume.""" return IMPL.block_device_mapping_get_by_volume_id(context, volume_id, columns_to_join) def block_device_mapping_destroy(context, bdm_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy(context, bdm_id) def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid, device_name): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy_by_instance_and_device( context, instance_uuid, device_name) def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id): """Destroy the block device mapping.""" return IMPL.block_device_mapping_destroy_by_instance_and_volume( context, instance_uuid, volume_id) #################### def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) def security_group_get(context, security_group_id, columns_to_join=None): """Get security group by its id.""" return IMPL.security_group_get(context, security_group_id, columns_to_join) def security_group_get_by_name(context, project_id, group_name, columns_to_join=None): """Returns a security group with the specified name from a project.""" return IMPL.security_group_get_by_name(context, project_id, group_name, columns_to_join=None) def security_group_get_by_project(context, project_id): """Get all security groups belonging to a project.""" return IMPL.security_group_get_by_project(context, project_id) def security_group_get_by_instance(context, instance_uuid): """Get security groups to which the instance is assigned.""" return IMPL.security_group_get_by_instance(context, instance_uuid) def security_group_in_use(context, group_id): """Indicates if a security group is currently in use.""" return IMPL.security_group_in_use(context, group_id) def security_group_create(context, values): """Create a new security group.""" return IMPL.security_group_create(context, values) def security_group_update(context, security_group_id, values, columns_to_join=None): """Update a security group.""" return IMPL.security_group_update(context, security_group_id, values, columns_to_join=columns_to_join) def security_group_ensure_default(context): """Ensure default security group exists for a project_id. Returns a tuple with the first element being a bool indicating if the default security group previously existed. Second element is the dict used to create the default security group. """ return IMPL.security_group_ensure_default(context) def security_group_destroy(context, security_group_id): """Deletes a security group.""" return IMPL.security_group_destroy(context, security_group_id) #################### def security_group_rule_create(context, values): """Create a new security group.""" return IMPL.security_group_rule_create(context, values) def security_group_rule_get_by_security_group(context, security_group_id, columns_to_join=None): """Get all rules for a given security group.""" return IMPL.security_group_rule_get_by_security_group( context, security_group_id, columns_to_join=columns_to_join) def security_group_rule_get_by_security_group_grantee(context, security_group_id): """Get all rules that grant access to the given security group.""" return IMPL.security_group_rule_get_by_security_group_grantee(context, security_group_id) def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) def security_group_rule_get(context, security_group_rule_id): """Gets a security group rule.""" return IMPL.security_group_rule_get(context, security_group_rule_id) def security_group_rule_count_by_group(context, security_group_id): """Count rules in a given security group.""" return IMPL.security_group_rule_count_by_group(context, security_group_id) ################### def security_group_default_rule_get(context, security_group_rule_default_id): return IMPL.security_group_default_rule_get(context, security_group_rule_default_id) def security_group_default_rule_destroy(context, security_group_rule_default_id): return IMPL.security_group_default_rule_destroy( context, security_group_rule_default_id) def security_group_default_rule_create(context, values): return IMPL.security_group_default_rule_create(context, values) def security_group_default_rule_list(context): return IMPL.security_group_default_rule_list(context) ################### def provider_fw_rule_create(context, rule): """Add a firewall rule at the provider level (all hosts & instances).""" return IMPL.provider_fw_rule_create(context, rule) def provider_fw_rule_get_all(context): """Get all provider-level firewall rules.""" return IMPL.provider_fw_rule_get_all(context) def provider_fw_rule_destroy(context, rule_id): """Delete a provider firewall rule from the database.""" return IMPL.provider_fw_rule_destroy(context, rule_id) ################### def project_get_networks(context, project_id, associate=True): """Return the network associated with the project. If associate is true, it will attempt to associate a new network if one is not found, otherwise it returns None. """ return IMPL.project_get_networks(context, project_id, associate) ################### def console_pool_create(context, values): """Create console pool.""" return IMPL.console_pool_create(context, values) def console_pool_get_by_host_type(context, compute_host, proxy_host, console_type): """Fetch a console pool for a given proxy host, compute host, and type.""" return IMPL.console_pool_get_by_host_type(context, compute_host, proxy_host, console_type) def console_pool_get_all_by_host_type(context, host, console_type): """Fetch all pools for given proxy host and type.""" return IMPL.console_pool_get_all_by_host_type(context, host, console_type) def console_create(context, values): """Create a console.""" return IMPL.console_create(context, values) def console_delete(context, console_id): """Delete a console.""" return IMPL.console_delete(context, console_id) def console_get_by_pool_instance(context, pool_id, instance_uuid): """Get console entry for a given instance and pool.""" return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid) def console_get_all_by_instance(context, instance_uuid, columns_to_join=None): """Get consoles for a given instance.""" return IMPL.console_get_all_by_instance(context, instance_uuid, columns_to_join) def console_get(context, console_id, instance_uuid=None): """Get a specific console (possibly on a given instance).""" return IMPL.console_get(context, console_id, instance_uuid) ################## def flavor_create(context, values, projects=None): """Create a new instance type.""" return IMPL.flavor_create(context, values, projects=projects) def flavor_get_all(context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): """Get all instance flavors.""" return IMPL.flavor_get_all( context, inactive=inactive, filters=filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker) def flavor_get(context, id): """Get instance type by id.""" return IMPL.flavor_get(context, id) def flavor_get_by_name(context, name): """Get instance type by name.""" return IMPL.flavor_get_by_name(context, name) def flavor_get_by_flavor_id(context, id, read_deleted=None): """Get instance type by flavor id.""" return IMPL.flavor_get_by_flavor_id(context, id, read_deleted) def flavor_destroy(context, name): """Delete an instance type.""" return IMPL.flavor_destroy(context, name) def flavor_access_get_by_flavor_id(context, flavor_id): """Get flavor access by flavor id.""" return IMPL.flavor_access_get_by_flavor_id(context, flavor_id) def flavor_access_add(context, flavor_id, project_id): """Add flavor access for project.""" return IMPL.flavor_access_add(context, flavor_id, project_id) def flavor_access_remove(context, flavor_id, project_id): """Remove flavor access for project.""" return IMPL.flavor_access_remove(context, flavor_id, project_id) def flavor_extra_specs_get(context, flavor_id): """Get all extra specs for an instance type.""" return IMPL.flavor_extra_specs_get(context, flavor_id) def flavor_extra_specs_get_item(context, flavor_id, key): """Get extra specs by key and flavor_id.""" return IMPL.flavor_extra_specs_get_item(context, flavor_id, key) def flavor_extra_specs_delete(context, flavor_id, key): """Delete the given extra specs item.""" IMPL.flavor_extra_specs_delete(context, flavor_id, key) def flavor_extra_specs_update_or_create(context, flavor_id, extra_specs): """Create or update instance type extra specs. This adds or modifies the key/value pairs specified in the extra specs dict argument """ IMPL.flavor_extra_specs_update_or_create(context, flavor_id, extra_specs) #################### def pci_device_get_by_addr(context, node_id, dev_addr): """Get PCI device by address.""" return IMPL.pci_device_get_by_addr(context, node_id, dev_addr) def pci_device_get_by_id(context, id): """Get PCI device by id.""" return IMPL.pci_device_get_by_id(context, id) def pci_device_get_all_by_node(context, node_id): """Get all PCI devices for one host.""" return IMPL.pci_device_get_all_by_node(context, node_id) def pci_device_get_all_by_instance_uuid(context, instance_uuid): """Get PCI devices allocated to instance.""" return IMPL.pci_device_get_all_by_instance_uuid(context, instance_uuid) def pci_device_destroy(context, node_id, address): """Delete a PCI device record.""" return IMPL.pci_device_destroy(context, node_id, address) def pci_device_update(context, node_id, address, value): """Update a pci device.""" return IMPL.pci_device_update(context, node_id, address, value) ################### def cell_create(context, values): """Create a new child Cell entry.""" return IMPL.cell_create(context, values) def cell_update(context, cell_name, values): """Update a child Cell entry.""" return IMPL.cell_update(context, cell_name, values) def cell_delete(context, cell_name): """Delete a child Cell.""" return IMPL.cell_delete(context, cell_name) def cell_get(context, cell_name): """Get a specific child Cell.""" return IMPL.cell_get(context, cell_name) def cell_get_all(context): """Get all child Cells.""" return IMPL.cell_get_all(context) #################### def instance_metadata_get(context, instance_uuid): """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_uuid) def instance_metadata_delete(context, instance_uuid, key): """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_uuid, key) def instance_metadata_update(context, instance_uuid, metadata, delete): """Update metadata if it exists, otherwise create it.""" return IMPL.instance_metadata_update(context, instance_uuid, metadata, delete) #################### def instance_system_metadata_get(context, instance_uuid): """Get all system metadata for an instance.""" return IMPL.instance_system_metadata_get(context, instance_uuid) def instance_system_metadata_update(context, instance_uuid, metadata, delete): """Update metadata if it exists, otherwise create it.""" IMPL.instance_system_metadata_update( context, instance_uuid, metadata, delete) #################### def agent_build_create(context, values): """Create a new agent build entry.""" return IMPL.agent_build_create(context, values) def agent_build_get_by_triple(context, hypervisor, os, architecture): """Get agent build by hypervisor/OS/architecture triple.""" return IMPL.agent_build_get_by_triple(context, hypervisor, os, architecture) def agent_build_get_all(context, hypervisor=None): """Get all agent builds.""" return IMPL.agent_build_get_all(context, hypervisor) def agent_build_destroy(context, agent_update_id): """Destroy agent build entry.""" IMPL.agent_build_destroy(context, agent_update_id) def agent_build_update(context, agent_build_id, values): """Update agent build entry.""" IMPL.agent_build_update(context, agent_build_id, values) #################### def bw_usage_get(context, uuid, start_period, mac, use_slave=False): """Return bw usage for instance and mac in a given audit period.""" return IMPL.bw_usage_get(context, uuid, start_period, mac) def bw_usage_get_by_uuids(context, uuids, start_period): """Return bw usages for instance(s) in a given audit period.""" return IMPL.bw_usage_get_by_uuids(context, uuids, start_period) def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=None, update_cells=True): """Update cached bandwidth usage for an instance's network based on mac address. Creates new record if needed. """ rv = IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed=last_refreshed) if update_cells: try: cells_rpcapi.CellsAPI().bw_usage_update_at_top(context, uuid, mac, start_period, bw_in, bw_out, last_ctr_in, last_ctr_out, last_refreshed) except Exception: LOG.exception(_("Failed to notify cells of bw_usage update")) return rv ################### def vol_get_usage_by_time(context, begin): """Return volumes usage that have been updated after a specified time.""" return IMPL.vol_get_usage_by_time(context, begin) def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes, instance_id, project_id, user_id, availability_zone, update_totals=False): """Update cached volume usage for a volume Creates new record if needed. """ return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes, instance_id, project_id, user_id, availability_zone, update_totals=update_totals) ################### def s3_image_get(context, image_id): """Find local s3 image represented by the provided id.""" return IMPL.s3_image_get(context, image_id) def s3_image_get_by_uuid(context, image_uuid): """Find local s3 image represented by the provided uuid.""" return IMPL.s3_image_get_by_uuid(context, image_uuid) def s3_image_create(context, image_uuid): """Create local s3 image represented by provided uuid.""" return IMPL.s3_image_create(context, image_uuid) #################### def aggregate_create(context, values, metadata=None): """Create a new aggregate with metadata.""" return IMPL.aggregate_create(context, values, metadata) def aggregate_get(context, aggregate_id): """Get a specific aggregate by id.""" return IMPL.aggregate_get(context, aggregate_id) def aggregate_get_by_host(context, host, key=None): """Get a list of aggregates that host belongs to.""" return IMPL.aggregate_get_by_host(context, host, key) def aggregate_metadata_get_by_host(context, host, key=None): """Get metadata for all aggregates that host belongs to. Returns a dictionary where each value is a set, this is to cover the case where there two aggregates have different values for the same key. Optional key filter """ return IMPL.aggregate_metadata_get_by_host(context, host, key) def aggregate_metadata_get_by_metadata_key(context, aggregate_id, key): """Get metadata for an aggregate by metadata key.""" return IMPL.aggregate_metadata_get_by_metadata_key(context, aggregate_id, key) def aggregate_host_get_by_metadata_key(context, key): """Get hosts with a specific metadata key metadata for all aggregates. Returns a dictionary where each key is a hostname and each value is a set of the key values return value: {machine: set( az1, az2 )} """ return IMPL.aggregate_host_get_by_metadata_key(context, key) def aggregate_update(context, aggregate_id, values): """Update the attributes of an aggregates. If values contains a metadata key, it updates the aggregate metadata too. """ return IMPL.aggregate_update(context, aggregate_id, values) def aggregate_delete(context, aggregate_id): """Delete an aggregate.""" return IMPL.aggregate_delete(context, aggregate_id) def aggregate_get_all(context): """Get all aggregates.""" return IMPL.aggregate_get_all(context) def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): """Add/update metadata. If set_delete=True, it adds only.""" IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) def aggregate_metadata_get(context, aggregate_id): """Get metadata for the specified aggregate.""" return IMPL.aggregate_metadata_get(context, aggregate_id) def aggregate_metadata_delete(context, aggregate_id, key): """Delete the given metadata key.""" IMPL.aggregate_metadata_delete(context, aggregate_id, key) def aggregate_host_add(context, aggregate_id, host): """Add host to the aggregate.""" IMPL.aggregate_host_add(context, aggregate_id, host) def aggregate_host_get_all(context, aggregate_id): """Get hosts for the specified aggregate.""" return IMPL.aggregate_host_get_all(context, aggregate_id) def aggregate_host_delete(context, aggregate_id, host): """Delete the given host from the aggregate.""" IMPL.aggregate_host_delete(context, aggregate_id, host) #################### def instance_fault_create(context, values, update_cells=True): """Create a new Instance Fault.""" rv = IMPL.instance_fault_create(context, values) if update_cells: try: cells_rpcapi.CellsAPI().instance_fault_create_at_top(context, rv) except Exception: LOG.exception(_("Failed to notify cells of instance fault")) return rv def instance_fault_get_by_instance_uuids(context, instance_uuids): """Get all instance faults for the provided instance_uuids.""" return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids) #################### def action_start(context, values): """Start an action for an instance.""" return IMPL.action_start(context, values) def action_finish(context, values): """Finish an action for an instance.""" return IMPL.action_finish(context, values) def actions_get(context, uuid): """Get all instance actions for the provided instance.""" return IMPL.actions_get(context, uuid) def action_get_by_request_id(context, uuid, request_id): """Get the action by request_id and given instance.""" return IMPL.action_get_by_request_id(context, uuid, request_id) def action_event_start(context, values): """Start an event on an instance action.""" return IMPL.action_event_start(context, values) def action_event_finish(context, values): """Finish an event on an instance action.""" return IMPL.action_event_finish(context, values) def action_events_get(context, action_id): """Get the events by action id.""" return IMPL.action_events_get(context, action_id) def action_event_get_by_id(context, action_id, event_id): return IMPL.action_event_get_by_id(context, action_id, event_id) #################### def get_ec2_instance_id_by_uuid(context, instance_id): """Get ec2 id through uuid from instance_id_mappings table.""" return IMPL.get_ec2_instance_id_by_uuid(context, instance_id) def get_instance_uuid_by_ec2_id(context, ec2_id): """Get uuid through ec2 id from instance_id_mappings table.""" return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id) def ec2_instance_create(context, instance_uuid, id=None): """Create the ec2 id to instance uuid mapping on demand.""" return IMPL.ec2_instance_create(context, instance_uuid, id) #################### def task_log_end_task(context, task_name, period_beginning, period_ending, host, errors, message=None): """Mark a task as complete for a given host/time period.""" return IMPL.task_log_end_task(context, task_name, period_beginning, period_ending, host, errors, message) def task_log_begin_task(context, task_name, period_beginning, period_ending, host, task_items=None, message=None): """Mark a task as started for a given host/time period.""" return IMPL.task_log_begin_task(context, task_name, period_beginning, period_ending, host, task_items, message) def task_log_get_all(context, task_name, period_beginning, period_ending, host=None, state=None): return IMPL.task_log_get_all(context, task_name, period_beginning, period_ending, host, state) def task_log_get(context, task_name, period_beginning, period_ending, host, state=None): return IMPL.task_log_get(context, task_name, period_beginning, period_ending, host, state) #################### def archive_deleted_rows(context, max_rows=None): """Move up to max_rows rows from production tables to corresponding shadow tables. :returns: number of rows archived. """ return IMPL.archive_deleted_rows(context, max_rows=max_rows) def archive_deleted_rows_for_table(context, tablename, max_rows=None): """Move up to max_rows rows from tablename to corresponding shadow table. :returns: number of rows archived. """ return IMPL.archive_deleted_rows_for_table(context, tablename, max_rows=max_rows) nova-2014.1.5/nova/db/migration.py0000664000567000056700000000244412540642543017753 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from nova import utils IMPL = utils.LazyPluggable('backend', config_group='database', sqlalchemy='nova.db.sqlalchemy.migration') def db_sync(version=None): """Migrate the database to `version` or the most recent version.""" return IMPL.db_sync(version=version) def db_version(): """Display the current database version.""" return IMPL.db_version() def db_initial_version(): """The starting version for the database.""" return IMPL.db_initial_version() nova-2014.1.5/nova/context.py0000664000567000056700000001763412540642543017070 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """RequestContext: context for requests that persist through all of nova.""" import copy import uuid import six from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import local from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import policy LOG = logging.getLogger(__name__) def generate_request_id(): return 'req-' + str(uuid.uuid4()) class RequestContext(object): """Security context and request information. Represents the user taking a given action within the system. """ def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, user_name=None, project_name=None, service_catalog=None, instance_lock_checked=False, **kwargs): """:param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_('Arguments dropped when creating context: %s') % str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if not request_id: request_id = generate_request_id() self.request_id = request_id self.auth_token = auth_token if service_catalog: # Only include required parts of service_catalog self.service_catalog = [s for s in service_catalog if s.get('type') in ('volume',)] else: # if list is empty or none self.service_catalog = [] self.instance_lock_checked = instance_lock_checked # NOTE(markmc): this attribute is currently only used by the # rs_limits turnstile pre-processor. # See https://lists.launchpad.net/openstack/msg12200.html self.quota_class = quota_class self.user_name = user_name self.project_name = project_name self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self) if overwrite or not hasattr(local.store, 'context'): self.update_store() def _get_read_deleted(self): return self._read_deleted def _set_read_deleted(self, read_deleted): if read_deleted not in ('no', 'yes', 'only'): raise ValueError(_("read_deleted can only be one of 'no', " "'yes' or 'only', not %r") % read_deleted) self._read_deleted = read_deleted def _del_read_deleted(self): del self._read_deleted read_deleted = property(_get_read_deleted, _set_read_deleted, _del_read_deleted) def update_store(self): local.store.context = self def to_dict(self): return {'user_id': self.user_id, 'project_id': self.project_id, 'is_admin': self.is_admin, 'read_deleted': self.read_deleted, 'roles': self.roles, 'remote_address': self.remote_address, 'timestamp': timeutils.strtime(self.timestamp), 'request_id': self.request_id, 'auth_token': self.auth_token, 'quota_class': self.quota_class, 'user_name': self.user_name, 'service_catalog': self.service_catalog, 'project_name': self.project_name, 'instance_lock_checked': self.instance_lock_checked, 'tenant': self.tenant, 'user': self.user} @classmethod def from_dict(cls, values): values.pop('user', None) values.pop('tenant', None) return cls(**values) def elevated(self, read_deleted=None, overwrite=False): """Return a version of this context with admin flag set.""" context = copy.copy(self) context.is_admin = True if 'admin' not in context.roles: context.roles.append('admin') if read_deleted is not None: context.read_deleted = read_deleted return context # NOTE(sirp): the openstack/common version of RequestContext uses # tenant/user whereas the Nova version uses project_id/user_id. We need # this shim in order to use context-aware code from openstack/common, like # logging, until we make the switch to using openstack/common's version of # RequestContext. @property def tenant(self): return self.project_id @property def user(self): return self.user_id def get_admin_context(read_deleted="no"): return RequestContext(user_id=None, project_id=None, is_admin=True, read_deleted=read_deleted, overwrite=False) def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def require_admin_context(ctxt): """Raise exception.AdminRequired() if context is an admin context.""" if not ctxt.is_admin: raise exception.AdminRequired() def require_context(ctxt): """Raise exception.NotAuthorized() if context is not a user or an admin context. """ if not ctxt.is_admin and not is_user_context(ctxt): raise exception.NotAuthorized() def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() nova-2014.1.5/nova/virt/0000775000567000056700000000000012540643452016003 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/imagecache.py0000664000567000056700000001203312540642544020423 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.compute import task_states from nova.compute import vm_states imagecache_opts = [ cfg.IntOpt('image_cache_manager_interval', default=2400, help='Number of seconds to wait between runs of the image ' 'cache manager'), cfg.StrOpt('image_cache_subdirectory_name', default='_base', help='Where cached images are stored under $instances_path. ' 'This is NOT the full path - just a folder name. ' 'For per-compute-host cached images, set to _base_$my_ip', deprecated_name='base_dir_name'), cfg.BoolOpt('remove_unused_base_images', default=True, help='Should unused base images be removed?', deprecated_group='libvirt'), cfg.IntOpt('remove_unused_original_minimum_age_seconds', default=(24 * 3600), help='Unused unresized base images younger than this will not ' 'be removed', deprecated_group='libvirt'), ] CONF = cfg.CONF CONF.register_opts(imagecache_opts) CONF.import_opt('host', 'nova.netconf') class ImageCacheManager(object): """Base class for the image cache manager. This class will provide a generic interface to the image cache manager. """ def __init__(self): self.remove_unused_base_images = CONF.remove_unused_base_images self.resize_states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH] def _get_base(self): """Returns the base directory of the cached images.""" raise NotImplementedError() def _list_running_instances(self, context, all_instances): """List running instances (on all compute nodes). This method returns a dictionary with the following keys: - used_images - image_popularity - instance_names """ used_images = {} image_popularity = {} instance_names = set() for instance in all_instances: # NOTE(mikal): "instance name" here means "the name of a directory # which might contain an instance" and therefore needs to include # historical permutations as well as the current one. instance_names.add(instance['name']) instance_names.add(instance['uuid']) if (instance['task_state'] in self.resize_states or instance['vm_state'] == vm_states.RESIZED): instance_names.add(instance['name'] + '_resize') instance_names.add(instance['uuid'] + '_resize') for image_key in ['image_ref', 'kernel_id', 'ramdisk_id']: try: image_ref_str = str(instance[image_key]) except KeyError: continue local, remote, insts = used_images.get(image_ref_str, (0, 0, [])) if instance['host'] == CONF.host: local += 1 else: remote += 1 insts.append(instance['name']) used_images[image_ref_str] = (local, remote, insts) image_popularity.setdefault(image_ref_str, 0) image_popularity[image_ref_str] += 1 return {'used_images': used_images, 'image_popularity': image_popularity, 'instance_names': instance_names} def _list_base_images(self, base_dir): """Return a list of the images present in _base. This method returns a dictionary with the following keys: - unexplained_images - originals """ return {'unexplained_images': [], 'originals': []} def _age_and_verify_cached_images(self, context, all_instances, base_dir): """Ages and verfies cached images.""" raise NotImplementedError() def update(self, context, all_instances): """The cache manager. This will invoke the cache manager. This will update the cache according to the defined cache management scheme. The information populated in the cached stats will be used for the cache management. """ raise NotImplementedError() nova-2014.1.5/nova/virt/vmwareapi/0000775000567000056700000000000012540643452017776 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/vmwareapi/read_write_util.py0000664000567000056700000001323712540642544023541 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Classes to handle image files Collection of classes to handle image upload/download to/from Image service (like Glance image storage and retrieval service) from/to ESX/ESXi server. """ import httplib import urllib import urllib2 import six.moves.urllib.parse as urlparse from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils LOG = logging.getLogger(__name__) USER_AGENT = "OpenStack-ESX-Adapter" READ_CHUNKSIZE = 65536 class GlanceFileRead(object): """Glance file read handler class.""" def __init__(self, glance_read_iter): self.glance_read_iter = glance_read_iter self.iter = self.get_next() def read(self, chunk_size): """Read an item from the queue. The chunk size is ignored for the Client ImageBodyIterator uses its own CHUNKSIZE. """ try: return self.iter.next() except StopIteration: return "" def get_next(self): """Get the next item from the image iterator.""" for data in self.glance_read_iter: yield data def close(self): """A dummy close just to maintain consistency.""" pass class VMwareHTTPFile(object): """Base class for HTTP file.""" def __init__(self, file_handle): self.eof = False self.file_handle = file_handle def set_eof(self, eof): """Set the end of file marker.""" self.eof = eof def get_eof(self): """Check if the end of file has been reached.""" return self.eof def close(self): """Close the file handle.""" try: self.file_handle.close() except Exception as exc: LOG.exception(exc) def _build_vim_cookie_headers(self, vim_cookies): """Build ESX host session cookie headers.""" cookie_header = "" for vim_cookie in vim_cookies: cookie_header = vim_cookie.name + "=" + vim_cookie.value break return cookie_header def write(self, data): """Write data to the file.""" raise NotImplementedError() def read(self, chunk_size): """Read a chunk of data.""" raise NotImplementedError() def get_size(self): """Get size of the file to be read.""" raise NotImplementedError() class VMwareHTTPWriteFile(VMwareHTTPFile): """VMware file write handler class.""" def __init__(self, host, data_center_name, datastore_name, cookies, file_path, file_size, scheme="https"): if utils.is_valid_ipv6(host): base_url = "%s://[%s]/folder/%s" % (scheme, host, file_path) else: base_url = "%s://%s/folder/%s" % (scheme, host, file_path) param_list = {"dcPath": data_center_name, "dsName": datastore_name} base_url = base_url + "?" + urllib.urlencode(param_list) _urlparse = urlparse.urlparse(base_url) scheme, netloc, path, params, query, fragment = _urlparse if scheme == "http": conn = httplib.HTTPConnection(netloc) elif scheme == "https": conn = httplib.HTTPSConnection(netloc) conn.putrequest("PUT", path + "?" + query) conn.putheader("User-Agent", USER_AGENT) conn.putheader("Content-Length", file_size) conn.putheader("Cookie", self._build_vim_cookie_headers(cookies)) conn.endheaders() self.conn = conn VMwareHTTPFile.__init__(self, conn) def write(self, data): """Write to the file.""" self.file_handle.send(data) def close(self): """Get the response and close the connection.""" try: self.conn.getresponse() except Exception as excep: LOG.debug(_("Exception during HTTP connection close in " "VMwareHTTPWrite. Exception is %s") % excep) super(VMwareHTTPWriteFile, self).close() class VMwareHTTPReadFile(VMwareHTTPFile): """VMware file read handler class.""" def __init__(self, host, data_center_name, datastore_name, cookies, file_path, scheme="https"): base_url = "%s://%s/folder/%s" % (scheme, host, urllib.pathname2url(file_path)) param_list = {"dcPath": data_center_name, "dsName": datastore_name} base_url = base_url + "?" + urllib.urlencode(param_list) headers = {'User-Agent': USER_AGENT, 'Cookie': self._build_vim_cookie_headers(cookies)} request = urllib2.Request(base_url, None, headers) conn = urllib2.urlopen(request) VMwareHTTPFile.__init__(self, conn) def read(self, chunk_size): """Read a chunk of data.""" # We are ignoring the chunk size passed for we want the pipe to hold # data items of the chunk-size that Glance Client uses for read # while writing. return self.file_handle.read(READ_CHUNKSIZE) def get_size(self): """Get size of the file to be read.""" return self.file_handle.headers.get("Content-Length", -1) nova-2014.1.5/nova/virt/vmwareapi/network_util.py0000664000567000056700000001634312540642544023106 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility functions for ESX Networking. """ from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util LOG = logging.getLogger(__name__) def get_network_with_the_name(session, network_name="vmnet0", cluster=None): """Gets reference to the network whose name is passed as the argument. """ host = vm_util.get_host_ref(session, cluster) if cluster is not None: vm_networks_ret = session._call_method(vim_util, "get_dynamic_property", cluster, "ClusterComputeResource", "network") else: vm_networks_ret = session._call_method(vim_util, "get_dynamic_property", host, "HostSystem", "network") # Meaning there are no networks on the host. suds responds with a "" # in the parent property field rather than a [] in the # ManagedObjectReference property field of the parent if not vm_networks_ret: LOG.debug(_("No networks configured on host!")) return vm_networks = vm_networks_ret.ManagedObjectReference network_obj = {} LOG.debug(_("Configured networks: %s"), vm_networks) for network in vm_networks: # Get network properties if network._type == 'DistributedVirtualPortgroup': props = session._call_method(vim_util, "get_dynamic_property", network, "DistributedVirtualPortgroup", "config") # NOTE(asomya): This only works on ESXi if the port binding is # set to ephemeral if props.name == network_name: network_obj['type'] = 'DistributedVirtualPortgroup' network_obj['dvpg'] = props.key dvs_props = session._call_method(vim_util, "get_dynamic_property", props.distributedVirtualSwitch, "VmwareDistributedVirtualSwitch", "uuid") network_obj['dvsw'] = dvs_props else: props = session._call_method(vim_util, "get_dynamic_property", network, "Network", "summary.name") if props == network_name: network_obj['type'] = 'Network' network_obj['name'] = network_name if (len(network_obj) > 0): return network_obj LOG.debug(_("Network %s not found on host!"), network_name) def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None): """Gets the vswitch associated with the physical network adapter with the name supplied. """ # Get the list of vSwicthes on the Host System host_mor = vm_util.get_host_ref(session, cluster) vswitches_ret = session._call_method(vim_util, "get_dynamic_property", host_mor, "HostSystem", "config.network.vswitch") # Meaning there are no vSwitches on the host. Shouldn't be the case, # but just doing code check if not vswitches_ret: return vswitches = vswitches_ret.HostVirtualSwitch # Get the vSwitch associated with the network adapter for elem in vswitches: try: for nic_elem in elem.pnic: if str(nic_elem).split('-')[-1].find(vlan_interface) != -1: return elem.name # Catching Attribute error as a vSwitch may not be associated with a # physical NIC. except AttributeError: pass def check_if_vlan_interface_exists(session, vlan_interface, cluster=None): """Checks if the vlan_interface exists on the esx host.""" host_mor = vm_util.get_host_ref(session, cluster) physical_nics_ret = session._call_method(vim_util, "get_dynamic_property", host_mor, "HostSystem", "config.network.pnic") # Meaning there are no physical nics on the host if not physical_nics_ret: return False physical_nics = physical_nics_ret.PhysicalNic for pnic in physical_nics: if vlan_interface == pnic.device: return True return False def get_vlanid_and_vswitch_for_portgroup(session, pg_name, cluster=None): """Get the vlan id and vswicth associated with the port group.""" host_mor = vm_util.get_host_ref(session, cluster) port_grps_on_host_ret = session._call_method(vim_util, "get_dynamic_property", host_mor, "HostSystem", "config.network.portgroup") if not port_grps_on_host_ret: msg = _("ESX SOAP server returned an empty port group " "for the host system in its response") LOG.error(msg) raise exception.NovaException(msg) port_grps_on_host = port_grps_on_host_ret.HostPortGroup for p_gp in port_grps_on_host: if p_gp.spec.name == pg_name: p_grp_vswitch_name = p_gp.vswitch.split("-")[-1] return p_gp.spec.vlanId, p_grp_vswitch_name def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None): """Creates a port group on the host system with the vlan tags supplied. VLAN id 0 means no vlan id association. """ client_factory = session._get_vim().client.factory add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec( client_factory, vswitch_name, pg_name, vlan_id) host_mor = vm_util.get_host_ref(session, cluster) network_system_mor = session._call_method(vim_util, "get_dynamic_property", host_mor, "HostSystem", "configManager.networkSystem") LOG.debug(_("Creating Port Group with name %s on " "the ESX host") % pg_name) try: session._call_method(session._get_vim(), "AddPortGroup", network_system_mor, portgrp=add_prt_grp_spec) except error_util.AlreadyExistsException: # There can be a race condition when two instances try # adding port groups at the same time. One succeeds, then # the other one will get an exception. Since we are # concerned with the port group being created, which is done # by the other call, we can ignore the exception. LOG.debug(_("Port Group %s already exists."), pg_name) LOG.debug(_("Created Port Group with name %s on " "the ESX host") % pg_name) nova-2014.1.5/nova/virt/vmwareapi/vim.py0000664000567000056700000002331312540642544021146 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Classes for making VMware VI SOAP calls. """ import httplib import urllib2 from oslo.config import cfg import suds from nova.openstack.common.gettextutils import _ from nova import utils from nova.virt.vmwareapi import error_util RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"' CONN_ABORT_ERROR = 'Software caused connection abort' ADDRESS_IN_USE_ERROR = 'Address already in use' vmwareapi_wsdl_loc_opt = cfg.StrOpt('wsdl_location', help='Optional VIM Service WSDL Location ' 'e.g http:///vimService.wsdl. ' 'Optional over-ride to default location for bug work-arounds') CONF = cfg.CONF CONF.register_opt(vmwareapi_wsdl_loc_opt, 'vmware') def get_moref(value, type): """Get managed object reference.""" moref = suds.sudsobject.Property(value) moref._type = type return moref def object_to_dict(obj, list_depth=1): """Convert Suds object into serializable format. The calling function can limit the amount of list entries that are converted. """ d = {} for k, v in suds.sudsobject.asdict(obj).iteritems(): if hasattr(v, '__keylist__'): d[k] = object_to_dict(v, list_depth=list_depth) elif isinstance(v, list): d[k] = [] used = 0 for item in v: used = used + 1 if used > list_depth: break if hasattr(item, '__keylist__'): d[k].append(object_to_dict(item, list_depth=list_depth)) else: d[k].append(item) else: d[k] = v return d class VIMMessagePlugin(suds.plugin.MessagePlugin): def addAttributeForValue(self, node): # suds does not handle AnyType properly. # VI SDK requires type attribute to be set when AnyType is used if node.name == 'value': node.set('xsi:type', 'xsd:string') def marshalled(self, context): """suds will send the specified soap envelope. Provides the plugin with the opportunity to prune empty nodes and fixup nodes before sending it to the server. """ # suds builds the entire request object based on the wsdl schema. # VI SDK throws server errors if optional SOAP nodes are sent # without values, e.g. as opposed to test context.envelope.prune() context.envelope.walk(self.addAttributeForValue) class Vim: """The VIM Object.""" def __init__(self, protocol="https", host="localhost"): """Creates the necessary Communication interfaces and gets the ServiceContent for initiating SOAP transactions. protocol: http or https host : ESX IPAddress[:port] or ESX Hostname[:port] """ if not suds: raise Exception(_("Unable to import suds.")) self._protocol = protocol self._host_name = host self.wsdl_url = Vim.get_wsdl_url(protocol, host) self.url = Vim.get_soap_url(protocol, host) self.client = suds.client.Client(self.wsdl_url, location=self.url, plugins=[VIMMessagePlugin()]) self._service_content = self.retrieve_service_content() def retrieve_service_content(self): return self.RetrieveServiceContent("ServiceInstance") @staticmethod def get_wsdl_url(protocol, host_name): """Allows override of the wsdl location, making this static means we can test the logic outside of the constructor without forcing the test environment to have multiple valid wsdl locations to test against. :param protocol: https or http :param host_name: localhost or other server name :return: string to WSDL location for vSphere WS Management API """ # optional WSDL location over-ride for work-arounds if CONF.vmware.wsdl_location: return CONF.vmware.wsdl_location # calculate default WSDL location if no override supplied return Vim.get_soap_url(protocol, host_name) + "/vimService.wsdl" @staticmethod def get_soap_url(protocol, host_name): """Calculates the location of the SOAP services for a particular server. Created as a static method for testing. :param protocol: https or http :param host_name: localhost or other vSphere server name :return: the url to the active vSphere WS Management API """ if utils.is_valid_ipv6(host_name): return '%s://[%s]/sdk' % (protocol, host_name) return '%s://%s/sdk' % (protocol, host_name) def get_service_content(self): """Gets the service content object.""" return self._service_content def __getattr__(self, attr_name): """Makes the API calls and gets the result.""" def vim_request_handler(managed_object, **kwargs): """Builds the SOAP message and parses the response for fault checking and other errors. managed_object : Managed Object Reference or Managed Object Name **kwargs : Keyword arguments of the call """ # Dynamic handler for VI SDK Calls try: request_mo = self._request_managed_object_builder( managed_object) request = getattr(self.client.service, attr_name) response = request(request_mo, **kwargs) # To check for the faults that are part of the message body # and not returned as Fault object response from the ESX # SOAP server if hasattr(error_util.FaultCheckers, attr_name.lower() + "_fault_checker"): fault_checker = getattr(error_util.FaultCheckers, attr_name.lower() + "_fault_checker") fault_checker(response) return response # Catch the VimFaultException that is raised by the fault # check of the SOAP response except error_util.VimFaultException: raise except suds.MethodNotFound: raise except suds.WebFault as excep: doc = excep.document fault_string = doc.childAtPath("/Envelope/Body/Fault/" "faultstring").getText() detail = doc.childAtPath("/Envelope/Body/Fault/detail") fault_list = [] details = {} if detail: for fault in detail.getChildren(): fault_list.append(fault.get("type")) for child in fault.getChildren(): details[child.name] = child.getText() raise error_util.VimFaultException(fault_list, fault_string, details) except AttributeError as excep: raise error_util.VimAttributeError(_("No such SOAP method " "'%s' provided by VI SDK") % (attr_name), excep) except (httplib.CannotSendRequest, httplib.ResponseNotReady, httplib.CannotSendHeader) as excep: raise error_util.SessionOverLoadException(_("httplib " "error in %s: ") % (attr_name), excep) except (urllib2.URLError, urllib2.HTTPError) as excep: raise error_util.SessionConnectionException(_("urllib2 " "error in %s: ") % (attr_name), excep) except Exception as excep: # Socket errors which need special handling for they # might be caused by ESX API call overload if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or str(excep).find(CONN_ABORT_ERROR)) != -1: raise error_util.SessionOverLoadException(_("Socket " "error in %s: ") % (attr_name), excep) # Type error that needs special handling for it might be # caused by ESX host API call overload elif str(excep).find(RESP_NOT_XML_ERROR) != -1: raise error_util.SessionOverLoadException(_("Type " "error in %s: ") % (attr_name), excep) else: raise error_util.VimException( _("Exception in %s ") % (attr_name), excep) return vim_request_handler def _request_managed_object_builder(self, managed_object): """Builds the request managed object.""" # Request Managed Object Builder if isinstance(managed_object, str): mo = suds.sudsobject.Property(managed_object) mo._type = managed_object else: mo = managed_object return mo def __repr__(self): return "VIM Object" def __str__(self): return "VIM Object" nova-2014.1.5/nova/virt/vmwareapi/imagecache.py0000664000567000056700000002023212540642544022416 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Image cache class Images that are stored in the cache folder will be stored in a folder whose name is the image ID. In the event that an image is discovered to be no longer used then a timestamp will be added to the image folder. The timestamp will be a folder - this is due to the fact that we can use the VMware API's for creating and deleting of folders (it really simplifies things). The timestamp will contain the time, on the compute node, when the image was first seen to be unused. At each aging iteration we check if the image can be aged. This is done by comparing the current nova compute time to the time embedded in the timestamp. If the time exceeds the configured aging time then the parent folder, that is the image ID folder, will be deleted. That effectivly ages the cached image. If an image is used then the timestamps will be deleted. When accessing a timestamp we make use of locking. This ensure that aging will not delete an image during the spawn operiation. When spawning the timestamp folder will be locked and the timestamps will be purged. This will ensure that a image is not deleted during the spawn. """ from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.virt import imagecache from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import vim_util LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('remove_unused_original_minimum_age_seconds', 'nova.virt.imagecache') TIMESTAMP_PREFIX = 'ts-' TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M-%S' class ImageCacheManager(imagecache.ImageCacheManager): def __init__(self, session, base_folder): super(ImageCacheManager, self).__init__() self._session = session self._base_folder = base_folder self._ds_browser = {} def _folder_delete(self, path, dc_ref): try: ds_util.file_delete(self._session, path, dc_ref) except (error_util.CannotDeleteFileException, error_util.FileFaultException, error_util.FileLockedException) as e: # There may be more than one process or thread that tries # to delete the file. LOG.warning(_("Unable to delete %(file)s. Exception: %(ex)s"), {'file': path, 'ex': e}) except error_util.FileNotFoundException: LOG.debug(_("File not found: %s"), path) def timestamp_folder_get(self, ds_path, image_id): """Returns the timestamp folder.""" return '%s/%s' % (ds_path, image_id) def timestamp_cleanup(self, dc_ref, ds_browser, ds_ref, ds_name, ds_path): ts = self._get_timestamp(ds_browser, ds_path) if ts: ts_path = '%s/%s' % (ds_path, ts) LOG.debug(_("Timestamp path %s exists. Deleting!"), ts_path) # Image is used - no longer need timestamp folder self._folder_delete(ts_path, dc_ref) def _get_timestamp(self, ds_browser, ds_path): files = ds_util.get_sub_folders(self._session, ds_browser, ds_path) if files: for file in files: if file.startswith(TIMESTAMP_PREFIX): return file def _get_timestamp_filename(self): return '%s%s' % (TIMESTAMP_PREFIX, timeutils.strtime(fmt=TIMESTAMP_FORMAT)) def _get_datetime_from_filename(self, timestamp_filename): ts = timestamp_filename.lstrip(TIMESTAMP_PREFIX) return timeutils.parse_strtime(ts, fmt=TIMESTAMP_FORMAT) def _get_ds_browser(self, ds_ref): ds_browser = self._ds_browser.get(ds_ref.value) if not ds_browser: ds_browser = vim_util.get_dynamic_property( self._session._get_vim(), ds_ref, "Datastore", "browser") self._ds_browser[ds_ref.value] = ds_browser return ds_browser def _list_datastore_images(self, ds_path, datastore): """Return a list of the images present in _base. This method returns a dictionary with the following keys: - unexplained_images - originals """ ds_browser = self._get_ds_browser(datastore['ref']) originals = ds_util.get_sub_folders(self._session, ds_browser, ds_path) return {'unexplained_images': [], 'originals': originals} def _age_cached_images(self, context, datastore, dc_info, ds_path): """Ages cached images.""" age_seconds = CONF.remove_unused_original_minimum_age_seconds unused_images = self.originals - self.used_images ds_browser = self._get_ds_browser(datastore['ref']) for image in unused_images: path = self.timestamp_folder_get(ds_path, image) # Lock to ensure that the spawn will not try and access a image # that is currently being deleted on the datastore. with lockutils.lock(path, lock_file_prefix='nova-vmware-ts', external=True): ts = self._get_timestamp(ds_browser, path) if not ts: ts_path = '%s/%s' % (path, self._get_timestamp_filename()) try: ds_util.mkdir(self._session, ts_path, dc_info.ref) except error_util.FileAlreadyExistsException: LOG.debug(_("Timestamp already exists.")) LOG.info(_("Image %s is no longer used by this node. " "Pending deletion!"), image) else: dt = self._get_datetime_from_filename(ts) if timeutils.is_older_than(dt, age_seconds): LOG.info(_("Image %s is no longer used. " "Deleting!"), path) # Image has aged - delete the image ID folder self._folder_delete(path, dc_info.ref) # If the image is used and the timestamp file exists then we delete # the timestamp. for image in self.used_images: path = self.timestamp_folder_get(ds_path, image) with lockutils.lock(path, lock_file_prefix='nova-vmware-ts', external=True): self.timestamp_cleanup(dc_info.ref, ds_browser, datastore['ref'], datastore['name'], path) def update(self, context, instances, datastores_info): """The cache manager entry point. This will invoke the cache manager. This will update the cache according to the defined cache management scheme. The information populated in the cached stats will be used for the cache management. """ # read running instances data running = self._list_running_instances(context, instances) self.used_images = set(running['used_images'].keys()) # perform the aging and image verification per datastore for (datastore, dc_info) in datastores_info: ds_path = ds_util.build_datastore_path(datastore['name'], self._base_folder) images = self._list_datastore_images(ds_path, datastore) self.originals = images['originals'] self._age_cached_images(context, datastore, dc_info, ds_path) nova-2014.1.5/nova/virt/vmwareapi/vm_util.py0000664000567000056700000015623112540642544022040 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The VMware API VM utility module to build SOAP object specs. """ import collections import copy import functools from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import vim_util CONF = cfg.CONF LOG = logging.getLogger(__name__) DSRecord = collections.namedtuple( 'DSRecord', ['datastore', 'name', 'capacity', 'freespace']) # A cache for VM references. The key will be the VM name # and the value is the VM reference. The VM name is unique. This # is either the UUID of the instance or UUID-rescue in the case # that this is a rescue VM. This is in order to prevent # unnecessary communication with the backend. _VM_REFS_CACHE = {} def vm_refs_cache_reset(): global _VM_REFS_CACHE _VM_REFS_CACHE = {} def vm_ref_cache_delete(id): _VM_REFS_CACHE.pop(id, None) def vm_ref_cache_update(id, vm_ref): _VM_REFS_CACHE[id] = vm_ref def vm_ref_cache_get(id): return _VM_REFS_CACHE.get(id) def _vm_ref_cache(id, func, session, data): vm_ref = vm_ref_cache_get(id) if not vm_ref: vm_ref = func(session, data) vm_ref_cache_update(id, vm_ref) return vm_ref def vm_ref_cache_from_instance(func): @functools.wraps(func) def wrapper(session, instance): id = instance['uuid'] return _vm_ref_cache(id, func, session, instance) return wrapper def vm_ref_cache_from_name(func): @functools.wraps(func) def wrapper(session, name): id = name return _vm_ref_cache(id, func, session, name) return wrapper # the config key which stores the VNC port VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]' def get_vm_create_spec(client_factory, instance, name, data_store_name, vif_infos, os_type="otherGuest"): """Builds the VM Create spec.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') config_spec.name = name config_spec.guestId = os_type # The name is the unique identifier for the VM. This will either be the # instance UUID or the instance UUID with suffix '-rescue' for VM's that # are in rescue mode config_spec.instanceUuid = name # Allow nested ESX instances to host 64 bit VMs. if os_type == "vmkernel5Guest": config_spec.nestedHVEnabled = "True" vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo') vm_file_info.vmPathName = "[" + data_store_name + "]" config_spec.files = vm_file_info tools_info = client_factory.create('ns0:ToolsConfigInfo') tools_info.afterPowerOn = True tools_info.afterResume = True tools_info.beforeGuestStandby = True tools_info.beforeGuestShutdown = True tools_info.beforeGuestReboot = True config_spec.tools = tools_info config_spec.numCPUs = int(instance['vcpus']) config_spec.memoryMB = int(instance['memory_mb']) vif_spec_list = [] for vif_info in vif_infos: vif_spec = create_network_spec(client_factory, vif_info) vif_spec_list.append(vif_spec) device_config_spec = vif_spec_list config_spec.deviceChange = device_config_spec # add vm-uuid and iface-id.x values for Neutron extra_config = [] opt = client_factory.create('ns0:OptionValue') opt.key = "nvp.vm-uuid" opt.value = instance['uuid'] extra_config.append(opt) i = 0 for vif_info in vif_infos: if vif_info['iface_id']: opt = client_factory.create('ns0:OptionValue') opt.key = "nvp.iface-id.%d" % i opt.value = vif_info['iface_id'] extra_config.append(opt) i += 1 config_spec.extraConfig = extra_config return config_spec def get_vm_resize_spec(client_factory, instance): """Provides updates for a VM spec.""" resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec') resize_spec.numCPUs = int(instance['vcpus']) resize_spec.memoryMB = int(instance['memory_mb']) return resize_spec def create_controller_spec(client_factory, key, adapter_type="lsiLogic"): """Builds a Config Spec for the LSI or Bus Logic Controller's addition which acts as the controller for the virtual hard disk to be attached to the VM. """ # Create a controller for the Virtual Hard Disk virtual_device_config = client_factory.create( 'ns0:VirtualDeviceConfigSpec') virtual_device_config.operation = "add" if adapter_type == "busLogic": virtual_controller = client_factory.create( 'ns0:VirtualBusLogicController') elif adapter_type == "lsiLogicsas": virtual_controller = client_factory.create( 'ns0:VirtualLsiLogicSASController') else: virtual_controller = client_factory.create( 'ns0:VirtualLsiLogicController') virtual_controller.key = key virtual_controller.busNumber = 0 virtual_controller.sharedBus = "noSharing" virtual_device_config.device = virtual_controller return virtual_device_config def create_network_spec(client_factory, vif_info): """Builds a config spec for the addition of a new network adapter to the VM. """ network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec') network_spec.operation = "add" # Keep compatible with other Hyper vif model parameter. if vif_info['vif_model'] == "e1000": vif_info['vif_model'] = "VirtualE1000" vif = 'ns0:' + vif_info['vif_model'] net_device = client_factory.create(vif) # NOTE(asomya): Only works on ESXi if the portgroup binding is set to # ephemeral. Invalid configuration if set to static and the NIC does # not come up on boot if set to dynamic. network_ref = vif_info['network_ref'] network_name = vif_info['network_name'] mac_address = vif_info['mac_address'] backing = None if network_ref and network_ref['type'] == 'OpaqueNetwork': backing_name = ''.join(['ns0:VirtualEthernetCard', 'OpaqueNetworkBackingInfo']) backing = client_factory.create(backing_name) backing.opaqueNetworkId = network_ref['network-id'] backing.opaqueNetworkType = network_ref['network-type'] elif (network_ref and network_ref['type'] == "DistributedVirtualPortgroup"): backing_name = ''.join(['ns0:VirtualEthernetCardDistributed', 'VirtualPortBackingInfo']) backing = client_factory.create(backing_name) portgroup = client_factory.create( 'ns0:DistributedVirtualSwitchPortConnection') portgroup.switchUuid = network_ref['dvsw'] portgroup.portgroupKey = network_ref['dvpg'] backing.port = portgroup else: backing = client_factory.create( 'ns0:VirtualEthernetCardNetworkBackingInfo') backing.deviceName = network_name connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = True connectable_spec.connected = True net_device.connectable = connectable_spec net_device.backing = backing # The Server assigns a Key to the device. Here we pass a -ve temporary key. # -ve because actual keys are +ve numbers and we don't # want a clash with the key that server might associate with the device net_device.key = -47 net_device.addressType = "manual" net_device.macAddress = mac_address net_device.wakeOnLanEnabled = True network_spec.device = net_device return network_spec def get_vmdk_attach_config_spec(client_factory, disk_type="preallocated", file_path=None, disk_size=None, linked_clone=False, controller_key=None, unit_number=None, device_name=None): """Builds the vmdk attach config spec.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') device_config_spec = [] virtual_device_config_spec = create_virtual_disk_spec(client_factory, controller_key, disk_type, file_path, disk_size, linked_clone, unit_number, device_name) device_config_spec.append(virtual_device_config_spec) config_spec.deviceChange = device_config_spec return config_spec def get_cdrom_attach_config_spec(client_factory, datastore, file_path, controller_key, cdrom_unit_number): """Builds and returns the cdrom attach config spec.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') device_config_spec = [] virtual_device_config_spec = create_virtual_cdrom_spec(client_factory, datastore, controller_key, file_path, cdrom_unit_number) device_config_spec.append(virtual_device_config_spec) config_spec.deviceChange = device_config_spec return config_spec def get_vmdk_detach_config_spec(client_factory, device, destroy_disk=False): """Builds the vmdk detach config spec.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') device_config_spec = [] virtual_device_config_spec = detach_virtual_disk_spec(client_factory, device, destroy_disk) device_config_spec.append(virtual_device_config_spec) config_spec.deviceChange = device_config_spec return config_spec def get_vm_extra_config_spec(client_factory, extra_opts): """Builds extra spec fields from a dictionary.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') # add the key value pairs extra_config = [] for key, value in extra_opts.iteritems(): opt = client_factory.create('ns0:OptionValue') opt.key = key opt.value = value extra_config.append(opt) config_spec.extraConfig = extra_config return config_spec def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None): """Gets the vmdk file path and the storage adapter type.""" if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice vmdk_file_path = None vmdk_controller_key = None disk_type = None adapter_type_dict = {} for device in hardware_devices: if device.__class__.__name__ == "VirtualDisk": if device.backing.__class__.__name__ == \ "VirtualDiskFlatVer2BackingInfo": if uuid: if uuid in device.backing.fileName: vmdk_file_path = device.backing.fileName else: vmdk_file_path = device.backing.fileName vmdk_controller_key = device.controllerKey if getattr(device.backing, 'thinProvisioned', False): disk_type = "thin" else: if getattr(device.backing, 'eagerlyScrub', False): disk_type = "eagerZeroedThick" else: disk_type = "preallocated" elif device.__class__.__name__ == "VirtualLsiLogicController": adapter_type_dict[device.key] = "lsiLogic" elif device.__class__.__name__ == "VirtualBusLogicController": adapter_type_dict[device.key] = "busLogic" elif device.__class__.__name__ == "VirtualIDEController": adapter_type_dict[device.key] = "ide" elif device.__class__.__name__ == "VirtualLsiLogicSASController": adapter_type_dict[device.key] = "lsiLogicsas" adapter_type = adapter_type_dict.get(vmdk_controller_key, "") return (vmdk_file_path, adapter_type, disk_type) def _find_controller_slot(controller_keys, taken, max_unit_number): for controller_key in controller_keys: for unit_number in range(max_unit_number): if not unit_number in taken.get(controller_key, []): return controller_key, unit_number def _is_ide_controller(device): return device.__class__.__name__ == 'VirtualIDEController' def _is_scsi_controller(device): return device.__class__.__name__ in ['VirtualLsiLogicController', 'VirtualLsiLogicSASController', 'VirtualBusLogicController'] def _find_allocated_slots(devices): """Return dictionary which maps controller_key to list of allocated unit numbers for that controller_key. """ taken = {} for device in devices: if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'): unit_numbers = taken.setdefault(device.controllerKey, []) unit_numbers.append(device.unitNumber) if _is_scsi_controller(device): # the SCSI controller sits on its own bus unit_numbers = taken.setdefault(device.key, []) unit_numbers.append(device.scsiCtlrUnitNumber) return taken def allocate_controller_key_and_unit_number(client_factory, devices, adapter_type): """This function inspects the current set of hardware devices and returns controller_key and unit_number that can be used for attaching a new virtual disk to adapter with the given adapter_type. """ if devices.__class__.__name__ == "ArrayOfVirtualDevice": devices = devices.VirtualDevice taken = _find_allocated_slots(devices) ret = None if adapter_type == 'ide': ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)] ret = _find_controller_slot(ide_keys, taken, 2) elif adapter_type in ['lsiLogic', 'lsiLogicsas', 'busLogic']: scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)] ret = _find_controller_slot(scsi_keys, taken, 16) if ret: return ret[0], ret[1], None # create new controller with the specified type and return its spec controller_key = -101 controller_spec = create_controller_spec(client_factory, controller_key, adapter_type) return controller_key, 0, controller_spec def get_rdm_disk(hardware_devices, uuid): """Gets the RDM disk key.""" if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if (device.__class__.__name__ == "VirtualDisk" and device.backing.__class__.__name__ == "VirtualDiskRawDiskMappingVer1BackingInfo" and device.backing.lunUuid == uuid): return device def get_copy_virtual_disk_spec(client_factory, adapter_type="lsiLogic", disk_type="preallocated"): """Builds the Virtual Disk copy spec.""" dest_spec = client_factory.create('ns0:VirtualDiskSpec') dest_spec.adapterType = get_vmdk_adapter_type(adapter_type) dest_spec.diskType = disk_type return dest_spec def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic", disk_type="preallocated"): """Builds the virtual disk create spec.""" create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec') create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type) create_vmdk_spec.diskType = disk_type create_vmdk_spec.capacityKb = size_in_kb return create_vmdk_spec def get_rdm_create_spec(client_factory, device, adapter_type="lsiLogic", disk_type="rdmp"): """Builds the RDM virtual disk create spec.""" create_vmdk_spec = client_factory.create('ns0:DeviceBackedVirtualDiskSpec') create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type) create_vmdk_spec.diskType = disk_type create_vmdk_spec.device = device return create_vmdk_spec def create_virtual_cdrom_spec(client_factory, datastore, controller_key, file_path, cdrom_unit_number): """Builds spec for the creation of a new Virtual CDROM to the VM.""" config_spec = client_factory.create( 'ns0:VirtualDeviceConfigSpec') config_spec.operation = "add" cdrom = client_factory.create('ns0:VirtualCdrom') cdrom_device_backing = client_factory.create( 'ns0:VirtualCdromIsoBackingInfo') cdrom_device_backing.datastore = datastore cdrom_device_backing.fileName = file_path cdrom.backing = cdrom_device_backing cdrom.controllerKey = controller_key cdrom.unitNumber = cdrom_unit_number cdrom.key = -1 connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = False connectable_spec.connected = True cdrom.connectable = connectable_spec config_spec.device = cdrom return config_spec def create_virtual_disk_spec(client_factory, controller_key, disk_type="preallocated", file_path=None, disk_size=None, linked_clone=False, unit_number=None, device_name=None): """Builds spec for the creation of a new/ attaching of an already existing Virtual Disk to the VM. """ virtual_device_config = client_factory.create( 'ns0:VirtualDeviceConfigSpec') virtual_device_config.operation = "add" if (file_path is None) or linked_clone: virtual_device_config.fileOperation = "create" virtual_disk = client_factory.create('ns0:VirtualDisk') if disk_type == "rdm" or disk_type == "rdmp": disk_file_backing = client_factory.create( 'ns0:VirtualDiskRawDiskMappingVer1BackingInfo') disk_file_backing.compatibilityMode = "virtualMode" \ if disk_type == "rdm" else "physicalMode" disk_file_backing.diskMode = "independent_persistent" disk_file_backing.deviceName = device_name or "" else: disk_file_backing = client_factory.create( 'ns0:VirtualDiskFlatVer2BackingInfo') disk_file_backing.diskMode = "persistent" if disk_type == "thin": disk_file_backing.thinProvisioned = True else: if disk_type == "eagerZeroedThick": disk_file_backing.eagerlyScrub = True disk_file_backing.fileName = file_path or "" connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = False connectable_spec.connected = True if not linked_clone: virtual_disk.backing = disk_file_backing else: virtual_disk.backing = copy.copy(disk_file_backing) virtual_disk.backing.fileName = "" virtual_disk.backing.parent = disk_file_backing virtual_disk.connectable = connectable_spec # The Server assigns a Key to the device. Here we pass a -ve random key. # -ve because actual keys are +ve numbers and we don't # want a clash with the key that server might associate with the device virtual_disk.key = -100 virtual_disk.controllerKey = controller_key virtual_disk.unitNumber = unit_number or 0 virtual_disk.capacityInKB = disk_size or 0 virtual_device_config.device = virtual_disk return virtual_device_config def detach_virtual_disk_spec(client_factory, device, destroy_disk=False): """Builds spec for the detach of an already existing Virtual Disk from VM. """ virtual_device_config = client_factory.create( 'ns0:VirtualDeviceConfigSpec') virtual_device_config.operation = "remove" if destroy_disk: virtual_device_config.fileOperation = "destroy" virtual_device_config.device = device return virtual_device_config def clone_vm_spec(client_factory, location, power_on=False, snapshot=None, template=False, config=None): """Builds the VM clone spec.""" clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec') clone_spec.location = location clone_spec.powerOn = power_on if snapshot: clone_spec.snapshot = snapshot if config is not None: clone_spec.config = config clone_spec.template = template return clone_spec def relocate_vm_spec(client_factory, datastore=None, host=None, disk_move_type="moveAllDiskBackingsAndAllowSharing"): """Builds the VM relocation spec.""" rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec') rel_spec.datastore = datastore rel_spec.diskMoveType = disk_move_type if host: rel_spec.host = host return rel_spec def get_dummy_vm_create_spec(client_factory, name, data_store_name): """Builds the dummy VM create spec.""" config_spec = client_factory.create('ns0:VirtualMachineConfigSpec') config_spec.name = name config_spec.guestId = "otherGuest" vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo') vm_file_info.vmPathName = "[" + data_store_name + "]" config_spec.files = vm_file_info tools_info = client_factory.create('ns0:ToolsConfigInfo') tools_info.afterPowerOn = True tools_info.afterResume = True tools_info.beforeGuestStandby = True tools_info.beforeGuestShutdown = True tools_info.beforeGuestReboot = True config_spec.tools = tools_info config_spec.numCPUs = 1 config_spec.memoryMB = 4 controller_key = -101 controller_spec = create_controller_spec(client_factory, controller_key) disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key) device_config_spec = [controller_spec, disk_spec] config_spec.deviceChange = device_config_spec return config_spec def get_machine_id_change_spec(client_factory, machine_id_str): """Builds the machine id change config spec.""" virtual_machine_config_spec = client_factory.create( 'ns0:VirtualMachineConfigSpec') opt = client_factory.create('ns0:OptionValue') opt.key = "machine.id" opt.value = machine_id_str virtual_machine_config_spec.extraConfig = [opt] return virtual_machine_config_spec def get_add_vswitch_port_group_spec(client_factory, vswitch_name, port_group_name, vlan_id): """Builds the virtual switch port group add spec.""" vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec') vswitch_port_group_spec.name = port_group_name vswitch_port_group_spec.vswitchName = vswitch_name # VLAN ID of 0 means that VLAN tagging is not to be done for the network. vswitch_port_group_spec.vlanId = int(vlan_id) policy = client_factory.create('ns0:HostNetworkPolicy') nicteaming = client_factory.create('ns0:HostNicTeamingPolicy') nicteaming.notifySwitches = True policy.nicTeaming = nicteaming vswitch_port_group_spec.policy = policy return vswitch_port_group_spec def get_vnc_config_spec(client_factory, port): """Builds the vnc config spec.""" virtual_machine_config_spec = client_factory.create( 'ns0:VirtualMachineConfigSpec') opt_enabled = client_factory.create('ns0:OptionValue') opt_enabled.key = "RemoteDisplay.vnc.enabled" opt_enabled.value = "true" opt_port = client_factory.create('ns0:OptionValue') opt_port.key = "RemoteDisplay.vnc.port" opt_port.value = port extras = [opt_enabled, opt_port] virtual_machine_config_spec.extraConfig = extras return virtual_machine_config_spec def get_vnc_port(session): """Return VNC port for an VM or None if there is no available port.""" min_port = CONF.vmware.vnc_port port_total = CONF.vmware.vnc_port_total allocated_ports = _get_allocated_vnc_ports(session) max_port = min_port + port_total for port in range(min_port, max_port): if port not in allocated_ports: return port raise exception.ConsolePortRangeExhausted(min_port=min_port, max_port=max_port) def _get_allocated_vnc_ports(session): """Return an integer set of all allocated VNC ports.""" # TODO(rgerganov): bug #1256944 # The VNC port should be unique per host, not per vCenter vnc_ports = set() result = session._call_method(vim_util, "get_objects", "VirtualMachine", [VNC_CONFIG_KEY]) while result: for obj in result.objects: if not hasattr(obj, 'propSet'): continue dynamic_prop = obj.propSet[0] option_value = dynamic_prop.val vnc_port = option_value.value vnc_ports.add(int(vnc_port)) token = _get_token(result) if token: result = session._call_method(vim_util, "continue_to_get_objects", token) else: break return vnc_ports def search_datastore_spec(client_factory, file_name): """Builds the datastore search spec.""" search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec') search_spec.matchPattern = [file_name] return search_spec def _get_token(results): """Get the token from the property results.""" return getattr(results, 'token', None) def _get_reference_for_value(results, value): for object in results.objects: if object.obj.value == value: return object def _get_object_for_value(results, value): for object in results.objects: if object.propSet[0].val == value: return object.obj def _get_object_for_optionvalue(results, value): for object in results.objects: if hasattr(object, "propSet") and object.propSet: if object.propSet[0].val.value == value: return object.obj def _get_object_from_results(session, results, value, func): while results: token = _get_token(results) object = func(results, value) if object: if token: session._call_method(vim_util, "cancel_retrieve", token) return object if token: results = session._call_method(vim_util, "continue_to_get_objects", token) else: return None def _cancel_retrieve_if_necessary(session, results): token = _get_token(results) if token: results = session._call_method(vim_util, "cancel_retrieve", token) def _get_vm_ref_from_name(session, vm_name): """Get reference to the VM with the name specified.""" vms = session._call_method(vim_util, "get_objects", "VirtualMachine", ["name"]) return _get_object_from_results(session, vms, vm_name, _get_object_for_value) @vm_ref_cache_from_name def get_vm_ref_from_name(session, vm_name): return (_get_vm_ref_from_vm_uuid(session, vm_name) or _get_vm_ref_from_name(session, vm_name)) def _get_vm_ref_from_uuid(session, instance_uuid): """Get reference to the VM with the uuid specified. This method reads all of the names of the VM's that are running on the backend, then it filters locally the matching instance_uuid. It is far more optimal to use _get_vm_ref_from_vm_uuid. """ vms = session._call_method(vim_util, "get_objects", "VirtualMachine", ["name"]) return _get_object_from_results(session, vms, instance_uuid, _get_object_for_value) def _get_vm_ref_from_vm_uuid(session, instance_uuid): """Get reference to the VM. The method will make use of FindAllByUuid to get the VM reference. This method finds all VM's on the backend that match the instance_uuid, more specifically all VM's on the backend that have 'config_spec.instanceUuid' set to 'instance_uuid'. """ vm_refs = session._call_method( session._get_vim(), "FindAllByUuid", session._get_vim().get_service_content().searchIndex, uuid=instance_uuid, vmSearch=True, instanceUuid=True) if vm_refs: return vm_refs[0] def _get_vm_ref_from_extraconfig(session, instance_uuid): """Get reference to the VM with the uuid specified.""" vms = session._call_method(vim_util, "get_objects", "VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]']) return _get_object_from_results(session, vms, instance_uuid, _get_object_for_optionvalue) @vm_ref_cache_from_instance def get_vm_ref(session, instance): """Get reference to the VM through uuid or vm name.""" uuid = instance['uuid'] vm_ref = (search_vm_ref_by_identifier(session, uuid) or _get_vm_ref_from_name(session, instance['name'])) if vm_ref is None: raise exception.InstanceNotFound(instance_id=uuid) return vm_ref def search_vm_ref_by_identifier(session, identifier): """Searches VM reference using the identifier. This method is primarily meant to separate out part of the logic for vm_ref search that could be use directly in the special case of migrating the instance. For querying VM linked to an instance always use get_vm_ref instead. """ vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or _get_vm_ref_from_extraconfig(session, identifier) or _get_vm_ref_from_uuid(session, identifier)) return vm_ref def get_host_ref_from_id(session, host_id, property_list=None): """Get a host reference object for a host_id string.""" if property_list is None: property_list = ['name'] host_refs = session._call_method( vim_util, "get_objects", "HostSystem", property_list) return _get_object_from_results(session, host_refs, host_id, _get_reference_for_value) def get_host_id_from_vm_ref(session, vm_ref): """This method allows you to find the managed object ID of the host running a VM. Since vMotion can change the value, you should not presume that this is a value that you can cache for very long and should be prepared to allow for it to change. :param session: a vSphere API connection :param vm_ref: a reference object to the running VM :return: the host_id running the virtual machine """ # to prevent typographical errors below property_name = 'runtime.host' # a property collector in VMware vSphere Management API # is a set of local representations of remote values. # property_set here, is a local representation of the # properties we are querying for. property_set = session._call_method( vim_util, "get_object_properties", None, vm_ref, vm_ref._type, [property_name]) prop = property_from_property_set( property_name, property_set) if prop is not None: prop = prop.val.value else: # reaching here represents an impossible state raise RuntimeError( "Virtual Machine %s exists without a runtime.host!" % (vm_ref)) return prop def property_from_property_set(property_name, property_set): '''Use this method to filter property collector results. Because network traffic is expensive, multiple VMwareAPI calls will sometimes pile-up properties to be collected. That means results may contain many different values for multiple purposes. This helper will filter a list for a single result and filter the properties of that result to find the single value of whatever type resides in that result. This could be a ManagedObjectReference ID or a complex value. :param property_name: name of property you want :param property_set: all results from query :return: the value of the property. ''' for prop in property_set.objects: p = _property_from_propSet(prop.propSet, property_name) if p is not None: return p def _property_from_propSet(propSet, name='name'): for p in propSet: if p.name == name: return p def get_host_ref_for_vm(session, instance, props): """Get the ESXi host running a VM by its name.""" vm_ref = get_vm_ref(session, instance) host_id = get_host_id_from_vm_ref(session, vm_ref) return get_host_ref_from_id(session, host_id, props) def get_host_name_for_vm(session, instance): """Get the ESXi host running a VM by its name.""" host_ref = get_host_ref_for_vm(session, instance, ['name']) return get_host_name_from_host_ref(host_ref) def get_host_name_from_host_ref(host_ref): p = _property_from_propSet(host_ref.propSet) if p is not None: return p.val def get_vm_state_from_name(session, vm_name): vm_ref = get_vm_ref_from_name(session, vm_name) vm_state = session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "runtime.powerState") return vm_state def get_stats_from_cluster(session, cluster): """Get the aggregate resource stats of a cluster.""" cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []} mem_info = {'total': 0, 'free': 0} # Get the Host and Resource Pool Managed Object Refs prop_dict = session._call_method(vim_util, "get_dynamic_properties", cluster, "ClusterComputeResource", ["host", "resourcePool"]) if prop_dict: host_ret = prop_dict.get('host') if host_ret: host_mors = host_ret.ManagedObjectReference result = session._call_method(vim_util, "get_properties_for_a_collection_of_objects", "HostSystem", host_mors, ["summary.hardware", "summary.runtime"]) for obj in result.objects: hardware_summary = obj.propSet[0].val runtime_summary = obj.propSet[1].val if (runtime_summary.inMaintenanceMode == False and runtime_summary.connectionState == "connected"): # Total vcpus is the sum of all pCPUs of individual hosts # The overcommitment ratio is factored in by the scheduler cpu_info['vcpus'] += hardware_summary.numCpuThreads cpu_info['cores'] += hardware_summary.numCpuCores cpu_info['vendor'].append(hardware_summary.vendor) cpu_info['model'].append(hardware_summary.cpuModel) res_mor = prop_dict.get('resourcePool') if res_mor: res_usage = session._call_method(vim_util, "get_dynamic_property", res_mor, "ResourcePool", "summary.runtime.memory") if res_usage: # maxUsage is the memory limit of the cluster available to VM's mem_info['total'] = int(res_usage.maxUsage / units.Mi) # overallUsage is the hypervisor's view of memory usage by VM's consumed = int(res_usage.overallUsage / units.Mi) mem_info['free'] = mem_info['total'] - consumed stats = {'cpu': cpu_info, 'mem': mem_info} return stats def get_cluster_ref_from_name(session, cluster_name): """Get reference to the cluster with the name specified.""" cls = session._call_method(vim_util, "get_objects", "ClusterComputeResource", ["name"]) return _get_object_from_results(session, cls, cluster_name, _get_object_for_value) def get_host_ref(session, cluster=None): """Get reference to a host within the cluster specified.""" if cluster is None: results = session._call_method(vim_util, "get_objects", "HostSystem") _cancel_retrieve_if_necessary(session, results) host_mor = results.objects[0].obj else: host_ret = session._call_method(vim_util, "get_dynamic_property", cluster, "ClusterComputeResource", "host") if not host_ret or not host_ret.ManagedObjectReference: msg = _('No host available on cluster') raise exception.NoValidHost(reason=msg) host_mor = host_ret.ManagedObjectReference[0] return host_mor def propset_dict(propset): """Turn a propset list into a dictionary PropSet is an optional attribute on ObjectContent objects that are returned by the VMware API. You can read more about these at: http://pubs.vmware.com/vsphere-51/index.jsp #com.vmware.wssdk.apiref.doc/ vmodl.query.PropertyCollector.ObjectContent.html :param propset: a property "set" from ObjectContent :return: dictionary representing property set """ if propset is None: return {} #TODO(hartsocks): once support for Python 2.6 is dropped # change to {[(prop.name, prop.val) for prop in propset]} return dict([(prop.name, prop.val) for prop in propset]) def _select_datastore(data_stores, best_match, datastore_regex=None): """Find the most preferable datastore in a given RetrieveResult object. :param data_stores: a RetrieveResult object from vSphere API call :param best_match: the current best match for datastore :param datastore_regex: an optional regular expression to match names :return: datastore_ref, datastore_name, capacity, freespace """ # data_stores is actually a RetrieveResult object from vSphere API call for obj_content in data_stores.objects: # the propset attribute "need not be set" by returning API if not hasattr(obj_content, 'propSet'): continue propdict = propset_dict(obj_content.propSet) # Local storage identifier vSphere doesn't support CIFS or # vfat for datastores, therefore filtered ds_type = propdict['summary.type'] ds_name = propdict['summary.name'] if ((ds_type == 'VMFS' or ds_type == 'NFS') and propdict.get('summary.accessible')): if datastore_regex is None or datastore_regex.match(ds_name): new_ds = DSRecord( datastore=obj_content.obj, name=ds_name, capacity=propdict['summary.capacity'], freespace=propdict['summary.freeSpace']) # favor datastores with more free space if new_ds.freespace > best_match.freespace: best_match = new_ds return best_match def get_datastore_ref_and_name(session, cluster=None, host=None, datastore_regex=None): """Get the datastore list and choose the most preferable one.""" if cluster is None and host is None: data_stores = session._call_method(vim_util, "get_objects", "Datastore", ["summary.type", "summary.name", "summary.capacity", "summary.freeSpace", "summary.accessible"]) else: if cluster is not None: datastore_ret = session._call_method( vim_util, "get_dynamic_property", cluster, "ClusterComputeResource", "datastore") else: datastore_ret = session._call_method( vim_util, "get_dynamic_property", host, "HostSystem", "datastore") if not datastore_ret: raise exception.DatastoreNotFound() data_store_mors = datastore_ret.ManagedObjectReference data_stores = session._call_method(vim_util, "get_properties_for_a_collection_of_objects", "Datastore", data_store_mors, ["summary.type", "summary.name", "summary.capacity", "summary.freeSpace", "summary.accessible"]) best_match = DSRecord(datastore=None, name=None, capacity=None, freespace=0) while data_stores: best_match = _select_datastore(data_stores, best_match, datastore_regex) token = _get_token(data_stores) if not token: break data_stores = session._call_method(vim_util, "continue_to_get_objects", token) if best_match.datastore: return best_match if datastore_regex: raise exception.DatastoreNotFound( _("Datastore regex %s did not match any datastores") % datastore_regex.pattern) else: raise exception.DatastoreNotFound() def _get_allowed_datastores(data_stores, datastore_regex, allowed_types): allowed = [] for obj_content in data_stores.objects: # the propset attribute "need not be set" by returning API if not hasattr(obj_content, 'propSet'): continue propdict = propset_dict(obj_content.propSet) # Local storage identifier vSphere doesn't support CIFS or # vfat for datastores, therefore filtered ds_type = propdict['summary.type'] ds_name = propdict['summary.name'] if (propdict['summary.accessible'] and ds_type in allowed_types): if datastore_regex is None or datastore_regex.match(ds_name): allowed.append({'ref': obj_content.obj, 'name': ds_name}) return allowed def get_available_datastores(session, cluster=None, datastore_regex=None): """Get the datastore list and choose the first local storage.""" if cluster: mobj = cluster type = "ClusterComputeResource" else: mobj = get_host_ref(session) type = "HostSystem" ds = session._call_method(vim_util, "get_dynamic_property", mobj, type, "datastore") if not ds: return [] data_store_mors = ds.ManagedObjectReference # NOTE(garyk): use utility method to retrieve remote objects data_stores = session._call_method(vim_util, "get_properties_for_a_collection_of_objects", "Datastore", data_store_mors, ["summary.type", "summary.name", "summary.accessible"]) allowed = [] while data_stores: allowed.extend(_get_allowed_datastores(data_stores, datastore_regex, ['VMFS', 'NFS'])) token = _get_token(data_stores) if not token: break data_stores = session._call_method(vim_util, "continue_to_get_objects", token) return allowed def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid): if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if (device.__class__.__name__ == "VirtualDisk" and device.backing.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo" and volume_uuid in device.backing.fileName): return device.backing.uuid def get_vmdk_backed_disk_device(hardware_devices, uuid): if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if (device.__class__.__name__ == "VirtualDisk" and device.backing.__class__.__name__ == "VirtualDiskFlatVer2BackingInfo" and device.backing.uuid == uuid): return device def get_vmdk_volume_disk(hardware_devices, path=None): if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice": hardware_devices = hardware_devices.VirtualDevice for device in hardware_devices: if (device.__class__.__name__ == "VirtualDisk"): if not path or path == device.backing.fileName: return device def get_res_pool_ref(session, cluster, node_mo_id): """Get the resource pool.""" if cluster is None: # With no cluster named, use the root resource pool. results = session._call_method(vim_util, "get_objects", "ResourcePool") _cancel_retrieve_if_necessary(session, results) # The 0th resource pool is always the root resource pool on both ESX # and vCenter. res_pool_ref = results.objects[0].obj else: if cluster.value == node_mo_id: # Get the root resource pool of the cluster res_pool_ref = session._call_method(vim_util, "get_dynamic_property", cluster, "ClusterComputeResource", "resourcePool") return res_pool_ref def get_all_cluster_mors(session): """Get all the clusters in the vCenter.""" try: results = session._call_method(vim_util, "get_objects", "ClusterComputeResource", ["name"]) _cancel_retrieve_if_necessary(session, results) return results.objects except Exception as excep: LOG.warn(_("Failed to get cluster references %s") % excep) def get_all_res_pool_mors(session): """Get all the resource pools in the vCenter.""" try: results = session._call_method(vim_util, "get_objects", "ResourcePool") _cancel_retrieve_if_necessary(session, results) return results.objects except Exception as excep: LOG.warn(_("Failed to get resource pool references " "%s") % excep) def get_dynamic_property_mor(session, mor_ref, attribute): """Get the value of an attribute for a given managed object.""" return session._call_method(vim_util, "get_dynamic_property", mor_ref, mor_ref._type, attribute) def find_entity_mor(entity_list, entity_name): """Returns managed object ref for given cluster or resource pool name.""" return [mor for mor in entity_list if (hasattr(mor, 'propSet') and mor.propSet[0].val == entity_name)] def get_all_cluster_refs_by_name(session, path_list): """Get reference to the Cluster, ResourcePool with the path specified. The path is the display name. This can be the full path as well. The input will have the list of clusters and resource pool names """ cls = get_all_cluster_mors(session) if not cls: return res = get_all_res_pool_mors(session) if not res: return path_list = [path.strip() for path in path_list] list_obj = [] for entity_path in path_list: # entity_path could be unique cluster and/or resource-pool name res_mor = find_entity_mor(res, entity_path) cls_mor = find_entity_mor(cls, entity_path) cls_mor.extend(res_mor) for mor in cls_mor: list_obj.append((mor.obj, mor.propSet[0].val)) return get_dict_mor(session, list_obj) def get_dict_mor(session, list_obj): """The input is a list of objects in the form (manage_object,display_name) The managed object will be in the form { value = "domain-1002", _type = "ClusterComputeResource" } Output data format: dict_mors = { 'respool-1001': { 'cluster_mor': clusterMor, 'res_pool_mor': resourcePoolMor, 'name': display_name }, 'domain-1002': { 'cluster_mor': clusterMor, 'res_pool_mor': resourcePoolMor, 'name': display_name }, } """ dict_mors = {} for obj_ref, path in list_obj: if obj_ref._type == "ResourcePool": # Get owner cluster-ref mor cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner") dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref, 'res_pool_mor': obj_ref, 'name': path, } else: # Get default resource pool of the cluster res_pool_ref = get_dynamic_property_mor(session, obj_ref, "resourcePool") dict_mors[obj_ref.value] = {'cluster_mor': obj_ref, 'res_pool_mor': res_pool_ref, 'name': path, } return dict_mors def get_mo_id_from_instance(instance): """Return the managed object ID from the instance. The instance['node'] will have the hypervisor_hostname field of the compute node on which the instance exists or will be provisioned. This will be of the form 'respool-1001(MyResPoolName)' 'domain-1001(MyClusterName)' """ return instance['node'].partition('(')[0] def get_vmdk_adapter_type(adapter_type): """Return the adapter type to be used in vmdk descriptor. Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic because Virtual Disk Manager API does not recognize the newer controller types. """ if adapter_type == "lsiLogicsas": vmdk_adapter_type = "lsiLogic" else: vmdk_adapter_type = adapter_type return vmdk_adapter_type def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref, vmfolder_ref): """Clone VM and link the cloned VM to the instance. Clones the passed vm_ref into a new VM and links the cloned vm to the passed instance. """ if vm_ref is None: LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called " "with vm_ref=None")) raise error_util.MissingParameter(param="vm_ref") # Get the clone vm spec client_factory = session._get_vim().client.factory rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref) extra_opts = {'nvp.vm-uuid': instance['uuid']} config_spec = get_vm_extra_config_spec(client_factory, extra_opts) config_spec.instanceUuid = instance['uuid'] clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec) # Clone VM on ESX host LOG.debug(_("Cloning VM for instance %s"), instance['uuid'], instance=instance) vm_clone_task = session._call_method(session._get_vim(), "CloneVM_Task", vm_ref, folder=vmfolder_ref, name=instance['uuid'], spec=clone_spec) session._wait_for_task(vm_clone_task) LOG.debug(_("Cloned VM for instance %s"), instance['uuid'], instance=instance) # Invalidate the cache, so that it is refetched the next time vm_ref_cache_delete(instance['uuid']) def disassociate_vmref_from_instance(session, instance, vm_ref=None, suffix='-orig'): """Disassociates the VM linked to the instance. Disassociates the VM linked to the instance by performing the following 1. Update the extraConfig property for nvp.vm-uuid to be replaced with instance[uuid]+suffix 2. Rename the VM to be instance[uuid]+suffix instead 3. Reset the instanceUUID of the VM to a new generated value """ if vm_ref is None: vm_ref = get_vm_ref(session, instance) extra_opts = {'nvp.vm-uuid': instance['uuid'] + suffix} client_factory = session._get_vim().client.factory reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts) reconfig_spec.name = instance['uuid'] + suffix reconfig_spec.instanceUuid = '' LOG.debug(_("Disassociating VM from instance %s"), instance['uuid'], instance=instance) reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task", vm_ref, spec=reconfig_spec) session._wait_for_task(reconfig_task) LOG.debug(_("Disassociated VM from instance %s"), instance['uuid'], instance=instance) # Invalidate the cache, so that it is refetched the next time vm_ref_cache_delete(instance['uuid']) def associate_vmref_for_instance(session, instance, vm_ref=None, suffix='-orig'): """Associates the VM to the instance. Associates the VM to the instance by performing the following 1. Update the extraConfig property for nvp.vm-uuid to be replaced with instance[uuid] 2. Rename the VM to be instance[uuid] 3. Reset the instanceUUID of the VM to be instance[uuid] """ if vm_ref is None: vm_ref = search_vm_ref_by_identifier(session, instance['uuid'] + suffix) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance['uuid'] + suffix) extra_opts = {'nvp.vm-uuid': instance['uuid']} client_factory = session._get_vim().client.factory reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts) reconfig_spec.name = instance['uuid'] reconfig_spec.instanceUuid = instance['uuid'] LOG.debug(_("Associating VM to instance %s"), instance['uuid'], instance=instance) reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task", vm_ref, spec=reconfig_spec) session._wait_for_task(reconfig_task) LOG.debug(_("Associated VM to instance %s"), instance['uuid'], instance=instance) # Invalidate the cache, so that it is refetched the next time vm_ref_cache_delete(instance['uuid']) nova-2014.1.5/nova/virt/vmwareapi/driver.py0000664000567000056700000012152112540642544021646 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A connection to the VMware ESX/vCenter platform. """ import re import time from eventlet import event from oslo.config import cfg import suds from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import uuidutils from nova.virt import driver from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import host from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vmops from nova.virt.vmwareapi import volumeops LOG = logging.getLogger(__name__) vmwareapi_opts = [ cfg.StrOpt('host_ip', help='Hostname or IP address for connection to VMware ESX/VC ' 'host.'), cfg.StrOpt('host_username', help='Username for connection to VMware ESX/VC host.'), cfg.StrOpt('host_password', help='Password for connection to VMware ESX/VC host.', secret=True), cfg.MultiStrOpt('cluster_name', help='Name of a VMware Cluster ComputeResource. Used only if ' 'compute_driver is vmwareapi.VMwareVCDriver.'), cfg.StrOpt('datastore_regex', help='Regex to match the name of a datastore.'), cfg.FloatOpt('task_poll_interval', default=0.5, help='The interval used for polling of remote tasks.'), cfg.IntOpt('api_retry_count', default=10, help='The number of times we retry on failures, e.g., ' 'socket error, etc.'), cfg.IntOpt('vnc_port', default=5900, help='VNC starting port'), cfg.IntOpt('vnc_port_total', default=10000, help='Total number of VNC ports'), cfg.BoolOpt('use_linked_clone', default=True, help='Whether to use linked clone'), ] CONF = cfg.CONF CONF.register_opts(vmwareapi_opts, 'vmware') TIME_BETWEEN_API_CALL_RETRIES = 1.0 class VMwareESXDriver(driver.ComputeDriver): """The ESX host connection object.""" capabilities = { "has_imagecache": True, "supports_recreate": False, } # VMwareAPI has both ESXi and vCenter API sets. # The ESXi API are a proper sub-set of the vCenter API. # That is to say, nearly all valid ESXi calls are # valid vCenter calls. There are some small edge-case # exceptions regarding VNC, CIM, User management & SSO. def _do_deprecation_warning(self): LOG.warning(_('The VMware ESX driver is now deprecated and will be ' 'removed in the Juno release. The VC driver will remain ' 'and continue to be supported.')) def __init__(self, virtapi, read_only=False, scheme="https"): super(VMwareESXDriver, self).__init__(virtapi) self._do_deprecation_warning() self._host_ip = CONF.vmware.host_ip if not (self._host_ip or CONF.vmware.host_username is None or CONF.vmware.host_password is None): raise Exception(_("Must specify host_ip, " "host_username " "and host_password to use " "compute_driver=vmwareapi.VMwareESXDriver or " "vmwareapi.VMwareVCDriver")) self._datastore_regex = None if CONF.vmware.datastore_regex: try: self._datastore_regex = re.compile(CONF.vmware.datastore_regex) except re.error: raise exception.InvalidInput(reason= _("Invalid Regular Expression %s") % CONF.vmware.datastore_regex) self._session = VMwareAPISession(scheme=scheme) self._volumeops = volumeops.VMwareVolumeOps(self._session) self._vmops = vmops.VMwareVMOps(self._session, self.virtapi, self._volumeops, datastore_regex=self._datastore_regex) self._host = host.Host(self._session) self._host_state = None #TODO(hartsocks): back-off into a configuration test module. if CONF.vmware.use_linked_clone is None: raise error_util.UseLinkedCloneConfigurationFault() @property def host_state(self): if not self._host_state: self._host_state = host.HostState(self._session, self._host_ip) return self._host_state def init_host(self, host): vim = self._session.vim if vim is None: self._session._create_session() def cleanup_host(self, host): # NOTE(hartsocks): we lean on the init_host to force the vim object # to not be None. vim = self._session.vim service_content = vim.get_service_content() session_manager = service_content.sessionManager try: vim.client.service.Logout(session_manager) except suds.WebFault: LOG.debug(_("No vSphere session was open during cleanup_host.")) pass def list_instances(self): """List VM instances.""" return self._vmops.list_instances() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create VM instance.""" self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def snapshot(self, context, instance, name, update_task_state): """Create snapshot from a running VM instance.""" self._vmops.snapshot(context, instance, name, update_task_state) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot VM instance.""" self._vmops.reboot(instance, network_info) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy VM instance.""" # Destroy gets triggered when Resource Claim in resource_tracker # is not successful. When resource claim is not successful, # node is not set in instance. Perform destroy only if node is set if not instance['node']: return self._vmops.destroy(instance, network_info, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Cleanup after instance being destroyed by Hypervisor.""" pass def pause(self, instance): """Pause VM instance.""" self._vmops.pause(instance) def unpause(self, instance): """Unpause paused VM instance.""" self._vmops.unpause(instance) def suspend(self, instance): """Suspend the specified instance.""" self._vmops.suspend(instance) def resume(self, context, instance, network_info, block_device_info=None): """Resume the suspended VM instance.""" self._vmops.resume(instance) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance.""" self._vmops.rescue(context, instance, network_info, image_meta) def unrescue(self, instance, network_info): """Unrescue the specified instance.""" self._vmops.unrescue(instance) def power_off(self, instance): """Power off the specified instance.""" self._vmops.power_off(instance) def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance.""" self._vmops._power_on(instance) def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted.""" # Check if the instance is running already and avoid doing # anything if it is. instances = self.list_instances() if instance['uuid'] not in instances: LOG.warn(_('Instance cannot be found in host, or in an unknown' 'state.'), instance=instance) else: state = vm_util.get_vm_state_from_name(self._session, instance['uuid']) ignored_states = ['poweredon', 'suspended'] if state.lower() in ignored_states: return # Instance is not up and could be in an unknown state. # Be as absolute as possible about getting it back into # a known and running state. self.reboot(context, instance, network_info, 'hard', block_device_info) def poll_rebooting_instances(self, timeout, instances): """Poll for rebooting instances.""" self._vmops.poll_rebooting_instances(timeout, instances) def get_info(self, instance): """Return info about the VM instance.""" return self._vmops.get_info(instance) def get_diagnostics(self, instance): """Return data about VM diagnostics.""" return self._vmops.get_diagnostics(instance) def get_vnc_console(self, context, instance): """Return link to instance's VNC console.""" return self._vmops.get_vnc_console(instance) def get_volume_connector(self, instance): """Return volume connector information.""" return self._volumeops.get_volume_connector(instance) def get_host_ip_addr(self): """Retrieves the IP address of the ESX host.""" return self._host_ip def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): """Attach volume storage to VM instance.""" return self._volumeops.attach_volume(connection_info, instance, mountpoint) def detach_volume(self, connection_info, instance, mountpoint, encryption=None): """Detach volume storage to VM instance.""" return self._volumeops.detach_volume(connection_info, instance, mountpoint) def get_console_pool_info(self, console_type): """Get info about the host on which the VM resides.""" return {'address': CONF.vmware.host_ip, 'username': CONF.vmware.host_username, 'password': CONF.vmware.host_password} def _get_available_resources(self, host_stats): return {'vcpus': host_stats['vcpus'], 'memory_mb': host_stats['host_memory_total'], 'local_gb': host_stats['disk_total'], 'vcpus_used': 0, 'memory_mb_used': host_stats['host_memory_total'] - host_stats['host_memory_free'], 'local_gb_used': host_stats['disk_used'], 'hypervisor_type': host_stats['hypervisor_type'], 'hypervisor_version': host_stats['hypervisor_version'], 'hypervisor_hostname': host_stats['hypervisor_hostname'], 'cpu_info': jsonutils.dumps(host_stats['cpu_info']), 'supported_instances': jsonutils.dumps( host_stats['supported_instances']), } def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :returns: dictionary describing resources """ host_stats = self.get_host_stats(refresh=True) # Updating host information return self._get_available_resources(host_stats) def update_host_status(self): """Update the status info of the host, and return those values to the calling program. """ return self.host_state.update_status() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first. """ return self.host_state.get_host_stats(refresh=refresh) def host_power_action(self, host, action): """Reboots, shuts down or powers up the host.""" return self._host.host_power_action(host, action) def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ return self._host.host_maintenance_mode(host, mode) def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._host.set_host_enabled(host, enabled) def get_host_uptime(self, host): return 'Please refer to %s for the uptime' % CONF.vmware.host_ip def inject_network_info(self, instance, network_info): """inject network info for specified instance.""" self._vmops.inject_network_info(instance, network_info) def list_instance_uuids(self): """List VM instance UUIDs.""" uuids = self._vmops.list_instances() return [uuid for uuid in uuids if uuidutils.is_uuid_like(uuid)] def manage_image_cache(self, context, all_instances): """Manage the local cache of images.""" self._vmops.manage_image_cache(context, all_instances) class VMwareVCDriver(VMwareESXDriver): """The VC host connection object.""" # The vCenter driver includes several additional VMware vSphere # capabilities that include API that act on hosts or groups of # hosts in clusters or non-cluster logical-groupings. # # vCenter is not a hypervisor itself, it works with multiple # hypervisor host machines and their guests. This fact can # subtly alter how vSphere and OpenStack interoperate. def _do_deprecation_warning(self): # Driver validated by VMware's Minesweeper CI pass def __init__(self, virtapi, read_only=False, scheme="https"): super(VMwareVCDriver, self).__init__(virtapi) # Get the list of clusters to be used self._cluster_names = CONF.vmware.cluster_name self.dict_mors = vm_util.get_all_cluster_refs_by_name(self._session, self._cluster_names) if not self.dict_mors: raise exception.NotFound(_("All clusters specified %s were not" " found in the vCenter") % self._cluster_names) # Check if there are any clusters that were specified in the nova.conf # but are not in the vCenter, for missing clusters log a warning. clusters_found = [v.get('name') for k, v in self.dict_mors.iteritems()] missing_clusters = set(self._cluster_names) - set(clusters_found) if missing_clusters: LOG.warn(_("The following clusters could not be found in the" " vCenter %s") % list(missing_clusters)) # The _resources is used to maintain the vmops, volumeops and vcstate # objects per cluster self._resources = {} self._resource_keys = set() self._virtapi = virtapi self._update_resources() # The following initialization is necessary since the base class does # not use VC state. first_cluster = self._resources.keys()[0] self._vmops = self._resources.get(first_cluster).get('vmops') self._volumeops = self._resources.get(first_cluster).get('volumeops') self._vc_state = self._resources.get(first_cluster).get('vcstate') def list_instances(self): """List VM instances from all nodes.""" instances = [] nodes = self.get_available_nodes() for node in nodes: vmops = self._get_vmops_for_compute_node(node) instances.extend(vmops.list_instances()) return instances def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): """Transfers the disk of a running instance in multiple phases, turning off the instance before the end. """ _vmops = self._get_vmops_for_compute_node(instance['node']) return _vmops.migrate_disk_and_power_off(context, instance, dest, flavor) def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.confirm_migration(migration, instance, network_info) def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): """Finish reverting a resize, powering back on the instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.finish_revert_migration(context, instance, network_info, block_device_info, power_on) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None, power_on=True): """Completes a resize, turning on the migrated instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info, power_on) def live_migration(self, context, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Live migration of an instance to another host.""" self._vmops.live_migration(context, instance_ref, dest, post_method, recover_method, block_migration) def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info): """Clean up destination node after a failed live migration.""" self.destroy(context, instance, network_info, block_device_info) def get_instance_disk_info(self, instance_name, block_device_info=None): pass def get_vnc_console(self, context, instance): """Return link to instance's VNC console using vCenter logic.""" # In this situation, ESXi and vCenter require different # API logic to create a valid VNC console connection object. # In specific, vCenter does not actually run the VNC service # itself. You must talk to the VNC host underneath vCenter. _vmops = self._get_vmops_for_compute_node(instance['node']) return _vmops.get_vnc_console_vcenter(instance) def _update_resources(self): """This method creates a dictionary of VMOps, VolumeOps and VCState. The VMwareVMOps, VMwareVolumeOps and VCState object is for each cluster/rp. The dictionary is of the form { domain-1000 : {'vmops': vmops_obj, 'volumeops': volumeops_obj, 'vcstate': vcstate_obj, 'name': MyCluster}, resgroup-1000 : {'vmops': vmops_obj, 'volumeops': volumeops_obj, 'vcstate': vcstate_obj, 'name': MyRP}, } """ added_nodes = set(self.dict_mors.keys()) - set(self._resource_keys) for node in added_nodes: _volumeops = volumeops.VMwareVolumeOps(self._session, self.dict_mors[node]['cluster_mor'], vc_support=True) _vmops = vmops.VMwareVCVMOps(self._session, self._virtapi, _volumeops, self.dict_mors[node]['cluster_mor'], datastore_regex=self._datastore_regex) name = self.dict_mors.get(node)['name'] nodename = self._create_nodename(node, name) _vc_state = host.VCState(self._session, nodename, self.dict_mors.get(node)['cluster_mor']) self._resources[nodename] = {'vmops': _vmops, 'volumeops': _volumeops, 'vcstate': _vc_state, 'name': name, } self._resource_keys.add(node) deleted_nodes = (set(self._resource_keys) - set(self.dict_mors.keys())) for node in deleted_nodes: name = self.dict_mors.get(node)['name'] nodename = self._create_nodename(node, name) del self._resources[nodename] self._resource_keys.discard(node) def _create_nodename(self, mo_id, display_name): """Creates the name that is stored in hypervisor_hostname column. The name will be of the form similar to domain-1000(MyCluster) resgroup-1000(MyResourcePool) """ return mo_id + '(' + display_name + ')' def _get_resource_for_node(self, nodename): """Gets the resource information for the specific node.""" resource = self._resources.get(nodename) if not resource: msg = _("The resource %s does not exist") % nodename raise exception.NotFound(msg) return resource def _get_vmops_for_compute_node(self, nodename): """Retrieve vmops object from mo_id stored in the node name. Node name is of the form domain-1000(MyCluster) """ resource = self._get_resource_for_node(nodename) return resource['vmops'] def _get_volumeops_for_compute_node(self, nodename): """Retrieve vmops object from mo_id stored in the node name. Node name is of the form domain-1000(MyCluster) """ resource = self._get_resource_for_node(nodename) return resource['volumeops'] def _get_vc_state_for_compute_node(self, nodename): """Retrieve VCState object from mo_id stored in the node name. Node name is of the form domain-1000(MyCluster) """ resource = self._get_resource_for_node(nodename) return resource['vcstate'] def get_available_resource(self, nodename): """Retrieve resource info. This method is called when nova-compute launches, and as part of a periodic task. :returns: dictionary describing resources """ stats_dict = {} vc_state = self._get_vc_state_for_compute_node(nodename) if vc_state: host_stats = vc_state.get_host_stats(refresh=True) # Updating host information stats_dict = self._get_available_resources(host_stats) else: LOG.info(_("Invalid cluster or resource pool" " name : %s") % nodename) return stats_dict def get_available_nodes(self, refresh=False): """Returns nodenames of all nodes managed by the compute service. This method is for multi compute-nodes support. If a driver supports multi compute-nodes, this method returns a list of nodenames managed by the service. Otherwise, this method should return [hypervisor_hostname]. """ self.dict_mors = vm_util.get_all_cluster_refs_by_name( self._session, CONF.vmware.cluster_name) node_list = [] self._update_resources() for node in self.dict_mors.keys(): nodename = self._create_nodename(node, self.dict_mors.get(node)['name']) node_list.append(nodename) LOG.debug(_("The available nodes are: %s") % node_list) return node_list def get_host_stats(self, refresh=True): """Return currently known host stats.""" stats_list = [] nodes = self.get_available_nodes() for node in nodes: stats_list.append(self.get_available_resource(node)) return stats_list def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): """Attach volume storage to VM instance.""" _volumeops = self._get_volumeops_for_compute_node(instance['node']) return _volumeops.attach_volume(connection_info, instance, mountpoint) def detach_volume(self, connection_info, instance, mountpoint, encryption=None): """Detach volume storage to VM instance.""" _volumeops = self._get_volumeops_for_compute_node(instance['node']) return _volumeops.detach_volume(connection_info, instance, mountpoint) def get_volume_connector(self, instance): """Return volume connector information.""" _volumeops = self._get_volumeops_for_compute_node(instance['node']) return _volumeops.get_volume_connector(instance) def snapshot(self, context, instance, name, update_task_state): """Create snapshot from a running VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.snapshot(context, instance, name, update_task_state) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.reboot(instance, network_info) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy VM instance.""" # Destroy gets triggered when Resource Claim in resource_tracker # is not successful. When resource claim is not successful, # node is not set in instance. Perform destroy only if node is set if not instance['node']: return _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.destroy(instance, network_info, destroy_disks) def pause(self, instance): """Pause VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.pause(instance) def unpause(self, instance): """Unpause paused VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.unpause(instance) def suspend(self, instance): """Suspend the specified instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.suspend(instance) def resume(self, context, instance, network_info, block_device_info=None): """Resume the suspended VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.resume(instance) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.rescue(context, instance, network_info, image_meta) def unrescue(self, instance, network_info): """Unrescue the specified instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.unrescue(instance) def power_off(self, instance): """Power off the specified instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.power_off(instance) def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops._power_on(instance) def poll_rebooting_instances(self, timeout, instances): """Poll for rebooting instances.""" for instance in instances: _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.poll_rebooting_instances(timeout, [instance]) def get_info(self, instance): """Return info about the VM instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) return _vmops.get_info(instance) def get_diagnostics(self, instance): """Return data about VM diagnostics.""" _vmops = self._get_vmops_for_compute_node(instance['node']) return _vmops.get_diagnostics(instance) def host_power_action(self, host, action): """Host operations not supported by VC driver. This needs to override the ESX driver implementation. """ raise NotImplementedError() def host_maintenance_mode(self, host, mode): """Host operations not supported by VC driver. This needs to override the ESX driver implementation. """ raise NotImplementedError() def set_host_enabled(self, host, enabled): """Host operations not supported by VC driver. This needs to override the ESX driver implementation. """ raise NotImplementedError() def inject_network_info(self, instance, network_info): """inject network info for specified instance.""" _vmops = self._get_vmops_for_compute_node(instance['node']) _vmops.inject_network_info(instance, network_info) def manage_image_cache(self, context, all_instances): """Manage the local cache of images.""" # Running instances per cluster cluster_instances = {} for instance in all_instances: instances = cluster_instances.get(instance['node']) if instances: instances.append(instance) else: instances = [instance] cluster_instances[instance['node']] = instances # Invoke the image aging per cluster for resource in self._resources.keys(): instances = cluster_instances.get(resource, []) _vmops = self._get_vmops_for_compute_node(resource) _vmops.manage_image_cache(context, instances) class VMwareAPISession(object): """Sets up a session with the VC/ESX host and handles all the calls made to the host. """ def __init__(self, host_ip=CONF.vmware.host_ip, username=CONF.vmware.host_username, password=CONF.vmware.host_password, retry_count=CONF.vmware.api_retry_count, scheme="https"): self._host_ip = host_ip self._host_username = username self._host_password = password self._api_retry_count = retry_count self._scheme = scheme self._session = None self.vim = None self._create_session() def _get_vim_object(self): """Create the VIM Object instance.""" return vim.Vim(protocol=self._scheme, host=self._host_ip) def _create_session(self): """Creates a session with the VC/ESX host.""" delay = 1 while True: try: # Login and setup the session with the host for making # API calls self.vim = self._get_vim_object() session = self.vim.Login( self.vim.get_service_content().sessionManager, userName=self._host_username, password=self._host_password) # Terminate the earlier session, if possible ( For the sake of # preserving sessions as there is a limit to the number of # sessions we can have ) if self._session: try: self.vim.TerminateSession( self.vim.get_service_content().sessionManager, sessionId=[self._session.key]) except Exception as excep: # This exception is something we can live with. It is # just an extra caution on our side. The session may # have been cleared. We could have made a call to # SessionIsActive, but that is an overhead because we # anyway would have to call TerminateSession. LOG.debug(excep) self._session = session return except Exception as excep: LOG.critical(_("Unable to connect to server at %(server)s, " "sleeping for %(seconds)s seconds"), {'server': self._host_ip, 'seconds': delay}, exc_info=True) # exc_info logs the exception with the message time.sleep(delay) delay = min(2 * delay, 60) def _is_vim_object(self, module): """Check if the module is a VIM Object instance.""" return isinstance(module, vim.Vim) def _session_is_active(self): active = False try: active = self.vim.SessionIsActive( self.vim.get_service_content().sessionManager, sessionID=self._session.key, userName=self._session.userName) except Exception as e: LOG.warning(_("Unable to validate session %s!"), self._session.key) LOG.debug(_("Exception: %(ex)s"), {'ex': e}) return active def _call_method(self, module, method, *args, **kwargs): """Calls a method within the module specified with args provided. """ args = list(args) retry_count = 0 while True: exc = None try: if not self._is_vim_object(module): # If it is not the first try, then get the latest # vim object if retry_count > 0: args = args[1:] args = [self.vim] + args retry_count += 1 temp_module = module for method_elem in method.split("."): temp_module = getattr(temp_module, method_elem) return temp_module(*args, **kwargs) except error_util.VimFaultException as excep: # If it is a Session Fault Exception, it may point # to a session gone bad. So we try re-creating a session # and then proceeding ahead with the call. exc = excep if error_util.NOT_AUTHENTICATED in excep.fault_list: # Because of the idle session returning an empty # RetrievePropertiesResponse and also the same is returned # when there is say empty answer to the query for # VMs on the host ( as in no VMs on the host), we have no # way to differentiate. We thus check if the session is # active if self._session_is_active(): return [] LOG.warning(_("Session %s is inactive!"), self._session.key) self._create_session() else: # No re-trying for errors for API call has gone through # and is the caller's fault. Caller should handle these # errors. e.g, InvalidArgument fault. # Raise specific exceptions here if possible if excep.fault_list: fault = excep.fault_list[0] raise error_util.get_fault_class(fault)(str(excep)) break except error_util.SessionOverLoadException as excep: # For exceptions which may come because of session overload, # we retry exc = excep except error_util.SessionConnectionException as excep: # For exceptions with connections we create the session exc = excep self._create_session() except Exception as excep: # If it is a proper exception, say not having furnished # proper data in the SOAP call or the retry limit having # exceeded, we raise the exception exc = excep break LOG.debug(_("_call_method(session=%(key)s) failed. " "Module: %(module)s. " "Method: %(method)s. " "args: %(args)s. " "kwargs: %(kwargs)s. " "Iteration: %(n)s. " "Exception: %(ex)s. "), {'key': self._session.key, 'module': module, 'method': method, 'args': args, 'kwargs': kwargs, 'n': retry_count, 'ex': exc}) # If retry count has been reached then break and # raise the exception if retry_count > self._api_retry_count: break time.sleep(TIME_BETWEEN_API_CALL_RETRIES) LOG.critical(_("In vmwareapi: _call_method (session=%s)"), self._session.key, exc_info=True) raise def _get_vim(self): """Gets the VIM object reference.""" if self.vim is None: self._create_session() return self.vim def _stop_loop(self, loop): loop.stop() def _wait_for_task(self, task_ref): """Return a Deferred that will give the result of the given task. The task is polled until it completes. """ done = event.Event() loop = loopingcall.FixedIntervalLoopingCall(self._poll_task, task_ref, done) loop.start(CONF.vmware.task_poll_interval) try: ret_val = done.wait() except Exception: raise finally: self._stop_loop(loop) return ret_val def _poll_task(self, task_ref, done): """Poll the given task, and fires the given Deferred if we get a result. """ try: task_info = self._call_method(vim_util, "get_dynamic_property", task_ref, "Task", "info") task_name = getattr(task_info, 'name', '') if task_info.state in ['queued', 'running']: return elif task_info.state == 'success': LOG.debug(_("Task [%(task_name)s] %(task_ref)s " "status: success"), {'task_name': task_name, 'task_ref': task_ref}) done.send(task_info) else: error_info = str(task_info.error.localizedMessage) LOG.warn(_("Task [%(task_name)s] %(task_ref)s " "status: error %(error_info)s"), {'task_name': task_name, 'task_ref': task_ref, 'error_info': error_info}) # Check if we can raise a specific exception error = task_info.error name = error.fault.__class__.__name__ task_ex = error_util.get_fault_class(name)(error_info) done.send_exception(task_ex) except Exception as excep: LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep) done.send_exception(excep) nova-2014.1.5/nova/virt/vmwareapi/fake.py0000664000567000056700000015220712540642544021266 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A fake VMware VI API implementation. """ import collections import pprint from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import units from nova.openstack.common import uuidutils from nova.virt.vmwareapi import error_util _CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine', 'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session', 'files', 'ClusterComputeResource', 'HostStorageSystem'] _FAKE_FILE_SIZE = 1024 _db_content = {} LOG = logging.getLogger(__name__) def log_db_contents(msg=None): """Log DB Contents.""" LOG.debug(_("%(text)s: _db_content => %(content)s"), {'text': msg or "", 'content': pprint.pformat(_db_content)}) def reset(vc=False): """Resets the db contents.""" cleanup() create_network() create_host_network_system() create_host_storage_system() ds_ref1 = create_datastore('ds1', 1024, 500) create_host(ds_ref=ds_ref1) if vc: ds_ref2 = create_datastore('ds2', 1024, 500) create_host(ds_ref=ds_ref2) create_datacenter('dc1', ds_ref1) if vc: create_datacenter('dc2', ds_ref2) create_res_pool() if vc: create_cluster('test_cluster', ds_ref1) create_cluster('test_cluster2', ds_ref2) def cleanup(): """Clear the db contents.""" for c in _CLASSES: # We fake the datastore by keeping the file references as a list of # names in the db if c == 'files': _db_content[c] = [] else: _db_content[c] = {} def _create_object(table, table_obj): """Create an object in the db.""" _db_content[table][table_obj.obj] = table_obj def _get_object(obj_ref): """Get object for the give reference.""" return _db_content[obj_ref.type][obj_ref] def _get_objects(obj_type): """Get objects of the type.""" lst_objs = FakeRetrieveResult() for key in _db_content[obj_type]: lst_objs.add_object(_db_content[obj_type][key]) return lst_objs def _convert_to_array_of_mor(mors): """Wraps the given array into a DataObject.""" array_of_mors = DataObject() array_of_mors.ManagedObjectReference = mors return array_of_mors def _convert_to_array_of_opt_val(optvals): """Wraps the given array into a DataObject.""" array_of_optv = DataObject() array_of_optv.OptionValue = optvals return array_of_optv class FakeRetrieveResult(object): """Object to retrieve a ObjectContent list.""" def __init__(self, token=None): self.objects = [] if token is not None: self.token = token def add_object(self, object): self.objects.append(object) class MissingProperty(object): """Missing object in ObjectContent's missing set.""" def __init__(self, path='fake-path', message='fake_message', method_fault=None): self.path = path self.fault = DataObject() self.fault.localizedMessage = message self.fault.fault = method_fault def _get_object_refs(obj_type): """Get object References of the type.""" lst_objs = [] for key in _db_content[obj_type]: lst_objs.append(key) return lst_objs def _update_object(table, table_obj): """Update objects of the type.""" _db_content[table][table_obj.obj] = table_obj class Prop(object): """Property Object base class.""" def __init__(self, name=None, val=None): self.name = name self.val = val class ManagedObjectReference(object): """A managed object reference is a remote identifier.""" def __init__(self, name="ManagedObject", value=None): super(ManagedObjectReference, self) # Managed Object Reference value attributes # typically have values like vm-123 or # host-232 and not UUID. self.value = value # Managed Object Reference type # attributes hold the name of the type # of the vCenter object the value # attribute is the identifier for self.type = name self._type = name class ObjectContent(object): """ObjectContent array holds dynamic properties.""" # This class is a *fake* of a class sent back to us by # SOAP. It has its own names. These names are decided # for us by the API we are *faking* here. def __init__(self, obj_ref, prop_list=None, missing_list=None): self.obj = obj_ref if not isinstance(prop_list, collections.Iterable): prop_list = [] if not isinstance(missing_list, collections.Iterable): missing_list = [] # propSet is the name your Python code will need to # use since this is the name that the API will use if prop_list: self.propSet = prop_list # missingSet is the name your python code will # need to use since this is the name that the # API we are talking to will use. if missing_list: self.missingSet = missing_list class ManagedObject(object): """Managed Object base class.""" _counter = 0 def __init__(self, mo_id_prefix="obj"): """Sets the obj property which acts as a reference to the object.""" object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix)) object.__setattr__(self, 'propSet', []) object.__setattr__(self, 'obj', ManagedObjectReference(self.__class__.__name__, self.mo_id)) def set(self, attr, val): """Sets an attribute value. Not using the __setattr__ directly for we want to set attributes of the type 'a.b.c' and using this function class we set the same. """ self.__setattr__(attr, val) def get(self, attr): """Gets an attribute. Used as an intermediary to get nested property like 'a.b.c' value. """ return self.__getattr__(attr) def __setattr__(self, attr, val): # TODO(hartsocks): this is adds unnecessary complexity to the class for prop in self.propSet: if prop.name == attr: prop.val = val return elem = Prop() elem.name = attr elem.val = val self.propSet.append(elem) def __getattr__(self, attr): # TODO(hartsocks): remove this # in a real ManagedObject you have to iterate the propSet # in a real ManagedObject, the propSet is a *set* not a list for elem in self.propSet: if elem.name == attr: return elem.val msg = _("Property %(attr)s not set for the managed object %(name)s") raise exception.NovaException(msg % {'attr': attr, 'name': self.__class__.__name__}) def _generate_moid(self, prefix): """Generates a new Managed Object ID.""" self.__class__._counter += 1 return prefix + "-" + str(self.__class__._counter) def __repr__(self): return jsonutils.dumps(dict([(elem.name, elem.val) for elem in self.propSet])) class DataObject(object): """Data object base class.""" def __init__(self, obj_name=None): self.obj_name = obj_name def __repr__(self): return str(self.__dict__) class HostInternetScsiHba(DataObject): """iSCSI Host Bus Adapter""" def __init__(self): super(HostInternetScsiHba, self).__init__() self.device = 'vmhba33' self.key = 'key-vmhba33' class FileAlreadyExists(DataObject): """File already exists class.""" def __init__(self): super(FileAlreadyExists, self).__init__() self.__name__ = error_util.FILE_ALREADY_EXISTS class FileNotFound(DataObject): """File not found class.""" def __init__(self): super(FileNotFound, self).__init__() self.__name__ = error_util.FILE_NOT_FOUND class FileFault(DataObject): """File fault.""" def __init__(self): super(FileFault, self).__init__() self.__name__ = error_util.FILE_FAULT class CannotDeleteFile(DataObject): """Cannot delete file.""" def __init__(self): super(CannotDeleteFile, self).__init__() self.__name__ = error_util.CANNOT_DELETE_FILE class FileLocked(DataObject): """File locked.""" def __init__(self): super(FileLocked, self).__init__() self.__name__ = error_util.FILE_LOCKED class VirtualDisk(DataObject): """Virtual Disk class.""" def __init__(self, controllerKey=0, unitNumber=0): super(VirtualDisk, self).__init__() self.key = 0 self.controllerKey = controllerKey self.unitNumber = unitNumber class VirtualDiskFlatVer2BackingInfo(DataObject): """VirtualDiskFlatVer2BackingInfo class.""" def __init__(self): super(VirtualDiskFlatVer2BackingInfo, self).__init__() self.thinProvisioned = False self.eagerlyScrub = False class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject): """VirtualDiskRawDiskMappingVer1BackingInfo class.""" def __init__(self): super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__() self.lunUuid = "" class VirtualIDEController(DataObject): def __init__(self, key=0): self.key = key class VirtualLsiLogicController(DataObject): """VirtualLsiLogicController class.""" def __init__(self, key=0, scsiCtlrUnitNumber=0): self.key = key self.scsiCtlrUnitNumber = scsiCtlrUnitNumber class VirtualLsiLogicSASController(DataObject): """VirtualLsiLogicSASController class.""" pass class VirtualPCNet32(DataObject): """VirtualPCNet32 class.""" def __init__(self): super(VirtualPCNet32, self).__init__() self.key = 4000 class OptionValue(DataObject): """OptionValue class.""" def __init__(self, key=None, value=None): super(OptionValue, self).__init__() self.key = key self.value = value class VirtualMachine(ManagedObject): """Virtual Machine class.""" def __init__(self, **kwargs): super(VirtualMachine, self).__init__("vm") self.set("name", kwargs.get("name", 'test-vm')) self.set("runtime.connectionState", kwargs.get("conn_state", "connected")) self.set("summary.config.guestId", kwargs.get("guest", "otherGuest")) ds_do = kwargs.get("ds", None) self.set("datastore", _convert_to_array_of_mor(ds_do)) self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus", "toolsOk")) self.set("summary.guest.toolsRunningStatus", kwargs.get( "toolsrunningstate", "guestToolsRunning")) self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn")) self.set("config.files.vmPathName", kwargs.get("vmPathName")) self.set("summary.config.numCpu", kwargs.get("numCpu", 1)) self.set("summary.config.memorySizeMB", kwargs.get("mem", 1)) self.set("summary.config.instanceUuid", kwargs.get("instanceUuid")) self.set("config.hardware.device", kwargs.get("virtual_device", None)) exconfig_do = kwargs.get("extra_config", None) self.set("config.extraConfig", _convert_to_array_of_opt_val(exconfig_do)) if exconfig_do: for optval in exconfig_do: self.set('config.extraConfig["%s"]' % optval.key, optval) self.set('runtime.host', kwargs.get("runtime_host", None)) self.device = kwargs.get("virtual_device") # Sample of diagnostics data is below. config = [ ('template', False), ('vmPathName', 'fake_path'), ('memorySizeMB', 512), ('cpuReservation', 0), ('memoryReservation', 0), ('numCpu', 1), ('numEthernetCards', 1), ('numVirtualDisks', 1)] self.set("summary.config", config) quickStats = [ ('overallCpuUsage', 0), ('overallCpuDemand', 0), ('guestMemoryUsage', 0), ('hostMemoryUsage', 141), ('balloonedMemory', 0), ('consumedOverheadMemory', 20)] self.set("summary.quickStats", quickStats) key1 = {'key': 'cpuid.AES'} key2 = {'key': 'cpuid.AVX'} runtime = [ ('connectionState', 'connected'), ('powerState', 'poweredOn'), ('toolsInstallerMounted', False), ('suspendInterval', 0), ('memoryOverhead', 21417984), ('maxCpuUsage', 2000), ('featureRequirement', [key1, key2])] self.set("summary.runtime", runtime) def reconfig(self, factory, val): """Called to reconfigure the VM. Actually customizes the property setting of the Virtual Machine object. """ if hasattr(val, 'name') and val.name: self.set("name", val.name) if hasattr(val, 'extraConfig'): extraConfigs = _merge_extraconfig( self.get("config.extraConfig").OptionValue, val.extraConfig) self.get("config.extraConfig").OptionValue = extraConfigs if hasattr(val, 'instanceUuid') and val.instanceUuid is not None: if val.instanceUuid == "": val.instanceUuid = uuidutils.generate_uuid() self.set("summary.config.instanceUuid", val.instanceUuid) try: if not hasattr(val, 'deviceChange'): return if len(val.deviceChange) < 2: return # Case of Reconfig of VM to attach disk controller_key = val.deviceChange[0].device.controllerKey filename = val.deviceChange[0].device.backing.fileName disk = VirtualDisk() disk.controllerKey = controller_key disk_backing = VirtualDiskFlatVer2BackingInfo() disk_backing.fileName = filename disk_backing.key = -101 disk.backing = disk_backing controller = VirtualLsiLogicController() controller.key = controller_key self.set("config.hardware.device", [disk, controller, self.device[0]]) except AttributeError: pass class Network(ManagedObject): """Network class.""" def __init__(self): super(Network, self).__init__("network") self.set("summary.name", "vmnet0") class ResourcePool(ManagedObject): """Resource Pool class.""" def __init__(self, name="test_ResPool", value="resgroup-test"): super(ResourcePool, self).__init__("rp") self.set("name", name) summary = DataObject() runtime = DataObject() config = DataObject() memory = DataObject() cpu = DataObject() memoryAllocation = DataObject() cpuAllocation = DataObject() vm_list = DataObject() memory.maxUsage = 1000 * units.Mi memory.overallUsage = 500 * units.Mi cpu.maxUsage = 10000 cpu.overallUsage = 1000 runtime.cpu = cpu runtime.memory = memory summary.runtime = runtime cpuAllocation.limit = 10000 memoryAllocation.limit = 1024 memoryAllocation.reservation = 1024 config.memoryAllocation = memoryAllocation config.cpuAllocation = cpuAllocation vm_list.ManagedObjectReference = [] self.set("summary", summary) self.set("summary.runtime.memory", memory) self.set("config", config) self.set("vm", vm_list) parent = ManagedObjectReference(value=value, name=name) owner = ManagedObjectReference(value=value, name=name) self.set("parent", parent) self.set("owner", owner) class DatastoreHostMount(DataObject): def __init__(self, value='host-100'): super(DatastoreHostMount, self).__init__() host_ref = (_db_content["HostSystem"] [_db_content["HostSystem"].keys()[0]].obj) host_system = DataObject() host_system.ManagedObjectReference = [host_ref] host_system.value = value self.key = host_system class ClusterComputeResource(ManagedObject): """Cluster class.""" def __init__(self, name="test_cluster"): super(ClusterComputeResource, self).__init__("domain") self.set("name", name) self.set("host", None) self.set("datastore", None) self.set("resourcePool", None) summary = DataObject() summary.numHosts = 0 summary.numCpuCores = 0 summary.numCpuThreads = 0 summary.numEffectiveHosts = 0 summary.totalMemory = 0 summary.effectiveMemory = 0 summary.effectiveCpu = 10000 self.set("summary", summary) def _add_root_resource_pool(self, r_pool): if r_pool: self.set("resourcePool", r_pool) def _add_host(self, host_sys): if host_sys: hosts = self.get("host") if hosts is None: hosts = DataObject() hosts.ManagedObjectReference = [] self.set("host", hosts) hosts.ManagedObjectReference.append(host_sys) # Update summary every time a new host is added self._update_summary() def _add_datastore(self, datastore): if datastore: datastores = self.get("datastore") if datastores is None: datastores = DataObject() datastores.ManagedObjectReference = [] self.set("datastore", datastores) datastores.ManagedObjectReference.append(datastore) # Method to update summary of a cluster upon host addition def _update_summary(self): summary = self.get("summary") summary.numHosts = 0 summary.numCpuCores = 0 summary.numCpuThreads = 0 summary.numEffectiveHosts = 0 summary.totalMemory = 0 summary.effectiveMemory = 0 hosts = self.get("host") # Compute the aggregate stats summary.numHosts = len(hosts.ManagedObjectReference) for host_ref in hosts.ManagedObjectReference: host_sys = _get_object(host_ref) connected = host_sys.get("connected") host_summary = host_sys.get("summary") summary.numCpuCores += host_summary.hardware.numCpuCores summary.numCpuThreads += host_summary.hardware.numCpuThreads summary.totalMemory += host_summary.hardware.memorySize free_memory = (host_summary.hardware.memorySize / units.Mi - host_summary.quickStats.overallMemoryUsage) summary.effectiveMemory += free_memory if connected else 0 summary.numEffectiveHosts += 1 if connected else 0 self.set("summary", summary) class Datastore(ManagedObject): """Datastore class.""" def __init__(self, name="fake-ds", capacity=1024, free=500): super(Datastore, self).__init__("ds") self.set("summary.type", "VMFS") self.set("summary.name", name) self.set("summary.capacity", capacity * units.Gi) self.set("summary.freeSpace", free * units.Gi) self.set("summary.accessible", True) self.set("browser", "") class HostNetworkSystem(ManagedObject): """HostNetworkSystem class.""" def __init__(self, name="networkSystem"): super(HostNetworkSystem, self).__init__("ns") self.set("name", name) pnic_do = DataObject() pnic_do.device = "vmnic0" net_info_pnic = DataObject() net_info_pnic.PhysicalNic = [pnic_do] self.set("networkInfo.pnic", net_info_pnic) class HostStorageSystem(ManagedObject): """HostStorageSystem class.""" def __init__(self): super(HostStorageSystem, self).__init__("storageSystem") class HostSystem(ManagedObject): """Host System class.""" def __init__(self, name="ha-host", connected=True, ds_ref=None, maintenance_mode=False): super(HostSystem, self).__init__("host") self.set("name", name) if _db_content.get("HostNetworkSystem", None) is None: create_host_network_system() if not _get_object_refs('HostStorageSystem'): create_host_storage_system() host_net_key = _db_content["HostNetworkSystem"].keys()[0] host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj self.set("configManager.networkSystem", host_net_sys) host_storage_sys_key = _get_object_refs('HostStorageSystem')[0] self.set("configManager.storageSystem", host_storage_sys_key) if not ds_ref: ds_ref = create_datastore('local-host-%s' % name, 500, 500) datastores = DataObject() datastores.ManagedObjectReference = [ds_ref] self.set("datastore", datastores) summary = DataObject() hardware = DataObject() hardware.numCpuCores = 8 hardware.numCpuPkgs = 2 hardware.numCpuThreads = 16 hardware.vendor = "Intel" hardware.cpuModel = "Intel(R) Xeon(R)" hardware.uuid = "host-uuid" hardware.memorySize = units.Gi summary.hardware = hardware runtime = DataObject() if connected: runtime.connectionState = "connected" else: runtime.connectionState = "disconnected" runtime.inMaintenanceMode = maintenance_mode summary.runtime = runtime quickstats = DataObject() quickstats.overallMemoryUsage = 500 summary.quickStats = quickstats product = DataObject() product.name = "VMware ESXi" product.version = "5.0.0" config = DataObject() config.product = product summary.config = config pnic_do = DataObject() pnic_do.device = "vmnic0" net_info_pnic = DataObject() net_info_pnic.PhysicalNic = [pnic_do] self.set("summary", summary) self.set("capability.maxHostSupportedVcpus", 600) self.set("summary.hardware", hardware) self.set("summary.runtime", runtime) self.set("config.network.pnic", net_info_pnic) self.set("connected", connected) if _db_content.get("Network", None) is None: create_network() net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj network_do = DataObject() network_do.ManagedObjectReference = [net_ref] self.set("network", network_do) vswitch_do = DataObject() vswitch_do.pnic = ["vmnic0"] vswitch_do.name = "vSwitch0" vswitch_do.portgroup = ["PortGroup-vmnet0"] net_swicth = DataObject() net_swicth.HostVirtualSwitch = [vswitch_do] self.set("config.network.vswitch", net_swicth) host_pg_do = DataObject() host_pg_do.key = "PortGroup-vmnet0" pg_spec = DataObject() pg_spec.vlanId = 0 pg_spec.name = "vmnet0" host_pg_do.spec = pg_spec host_pg = DataObject() host_pg.HostPortGroup = [host_pg_do] self.set("config.network.portgroup", host_pg) config = DataObject() storageDevice = DataObject() iscsi_hba = HostInternetScsiHba() iscsi_hba.iScsiName = "iscsi-name" host_bus_adapter_array = DataObject() host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba] storageDevice.hostBusAdapter = host_bus_adapter_array config.storageDevice = storageDevice self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array) # Set the same on the storage system managed object host_storage_sys = _get_object(host_storage_sys_key) host_storage_sys.set('storageDeviceInfo.hostBusAdapter', host_bus_adapter_array) def _add_iscsi_target(self, data): default_lun = DataObject() default_lun.scsiLun = 'key-vim.host.ScsiDisk-010' default_lun.key = 'key-vim.host.ScsiDisk-010' default_lun.deviceName = 'fake-device' default_lun.uuid = 'fake-uuid' scsi_lun_array = DataObject() scsi_lun_array.ScsiLun = [default_lun] self.set("config.storageDevice.scsiLun", scsi_lun_array) transport = DataObject() transport.address = [data['target_portal']] transport.iScsiName = data['target_iqn'] default_target = DataObject() default_target.lun = [default_lun] default_target.transport = transport iscsi_adapter = DataObject() iscsi_adapter.adapter = 'key-vmhba33' iscsi_adapter.transport = transport iscsi_adapter.target = [default_target] iscsi_topology = DataObject() iscsi_topology.adapter = [iscsi_adapter] self.set("config.storageDevice.scsiTopology", iscsi_topology) def _add_port_group(self, spec): """Adds a port group to the host system object in the db.""" pg_name = spec.name vswitch_name = spec.vswitchName vlanid = spec.vlanId vswitch_do = DataObject() vswitch_do.pnic = ["vmnic0"] vswitch_do.name = vswitch_name vswitch_do.portgroup = ["PortGroup-%s" % pg_name] vswitches = self.get("config.network.vswitch").HostVirtualSwitch vswitches.append(vswitch_do) host_pg_do = DataObject() host_pg_do.key = "PortGroup-%s" % pg_name pg_spec = DataObject() pg_spec.vlanId = vlanid pg_spec.name = pg_name host_pg_do.spec = pg_spec host_pgrps = self.get("config.network.portgroup").HostPortGroup host_pgrps.append(host_pg_do) class Datacenter(ManagedObject): """Datacenter class.""" def __init__(self, name="ha-datacenter", ds_ref=None): super(Datacenter, self).__init__("dc") self.set("name", name) self.set("vmFolder", "vm_folder_ref") if _db_content.get("Network", None) is None: create_network() net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj network_do = DataObject() network_do.ManagedObjectReference = [net_ref] self.set("network", network_do) if ds_ref: datastore = DataObject() datastore.ManagedObjectReference = [ds_ref] else: datastore = None self.set("datastore", datastore) class Task(ManagedObject): """Task class.""" def __init__(self, task_name, state="running", result=None, error_fault=None): super(Task, self).__init__("Task") info = DataObject() info.name = task_name info.state = state if state == 'error': error = DataObject() error.localizedMessage = "Error message" if not error_fault: error.fault = DataObject() else: error.fault = error_fault info.error = error info.result = result self.set("info", info) def create_host_network_system(): host_net_system = HostNetworkSystem() _create_object("HostNetworkSystem", host_net_system) def create_host_storage_system(): host_storage_system = HostStorageSystem() _create_object("HostStorageSystem", host_storage_system) def create_host(ds_ref=None): host_system = HostSystem(ds_ref=ds_ref) _create_object('HostSystem', host_system) def create_datacenter(name, ds_ref=None): data_center = Datacenter(name, ds_ref) _create_object('Datacenter', data_center) def create_datastore(name, capacity, free): data_store = Datastore(name, capacity, free) _create_object('Datastore', data_store) return data_store.obj def create_res_pool(): res_pool = ResourcePool() _create_object('ResourcePool', res_pool) return res_pool.obj def create_network(): network = Network() _create_object('Network', network) def create_cluster(name, ds_ref): cluster = ClusterComputeResource(name=name) cluster._add_host(_get_object_refs("HostSystem")[0]) cluster._add_host(_get_object_refs("HostSystem")[1]) cluster._add_datastore(ds_ref) cluster._add_root_resource_pool(create_res_pool()) _create_object('ClusterComputeResource', cluster) def create_task(task_name, state="running", result=None, error_fault=None): task = Task(task_name, state, result, error_fault) _create_object("Task", task) return task def _add_file(file_path): """Adds a file reference to the db.""" _db_content["files"].append(file_path) def _remove_file(file_path): """Removes a file reference from the db.""" if _db_content.get("files") is None: raise exception.NoFilesFound() # Check if the remove is for a single file object or for a folder if file_path.find(".vmdk") != -1: if file_path not in _db_content.get("files"): raise error_util.FileNotFoundException(file_path) _db_content.get("files").remove(file_path) else: # Removes the files in the folder and the folder too from the db to_delete = set() for file in _db_content.get("files"): if file.find(file_path) != -1: to_delete.add(file) for file in to_delete: _db_content.get("files").remove(file) def fake_plug_vifs(*args, **kwargs): """Fakes plugging vifs.""" pass def fake_get_network(*args, **kwargs): """Fake get network.""" return {'type': 'fake'} def get_file(file_path): """Check if file exists in the db.""" if _db_content.get("files") is None: raise exception.NoFilesFound() return file_path in _db_content.get("files") def fake_fetch_image(context, image, instance, **kwargs): """Fakes fetch image call. Just adds a reference to the db for the file.""" ds_name = kwargs.get("datastore_name") file_path = kwargs.get("file_path") ds_file_path = "[" + ds_name + "] " + file_path _add_file(ds_file_path) def fake_upload_image(context, image, instance, **kwargs): """Fakes the upload of an image.""" pass def fake_get_vmdk_size_and_properties(context, image_id, instance): """Fakes the file size and properties fetch for the image file.""" props = {"vmware_ostype": "otherGuest", "vmware_adaptertype": "lsiLogic"} return _FAKE_FILE_SIZE, props def _get_vm_mdo(vm_ref): """Gets the Virtual Machine with the ref from the db.""" if _db_content.get("VirtualMachine", None) is None: raise exception.NotFound(_("There is no VM registered")) if vm_ref not in _db_content.get("VirtualMachine"): raise exception.NotFound(_("Virtual Machine with ref %s is not " "there") % vm_ref) return _db_content.get("VirtualMachine")[vm_ref] def _merge_extraconfig(existing, changes): """Imposes the changes in extraConfig over the existing extraConfig.""" existing = existing or [] if (changes): for c in changes: if len([x for x in existing if x.key == c.key]) > 0: extraConf = [x for x in existing if x.key == c.key][0] extraConf.value = c.value else: existing.append(c) return existing class FakeFactory(object): """Fake factory class for the suds client.""" def create(self, obj_name): """Creates a namespace object.""" return DataObject(obj_name) class FakeVim(object): """Fake VIM Class.""" def __init__(self, protocol="https", host="localhost", trace=None): """Initializes the suds client object, sets the service content contents and the cookies for the session. """ self._session = None self.client = DataObject() self.client.factory = FakeFactory() transport = DataObject() transport.cookiejar = "Fake-CookieJar" options = DataObject() options.transport = transport self.client.options = options service_content = self.client.factory.create('ns0:ServiceContent') service_content.propertyCollector = "PropCollector" service_content.virtualDiskManager = "VirtualDiskManager" service_content.fileManager = "FileManager" service_content.rootFolder = "RootFolder" service_content.sessionManager = "SessionManager" service_content.searchIndex = "SearchIndex" about_info = DataObject() about_info.name = "VMware vCenter Server" about_info.version = "5.1.0" service_content.about = about_info self._service_content = service_content def get_service_content(self): return self._service_content def __repr__(self): return "Fake VIM Object" def __str__(self): return "Fake VIM Object" def _login(self): """Logs in and sets the session object in the db.""" self._session = uuidutils.generate_uuid() session = DataObject() session.key = self._session session.userName = 'sessionUserName' _db_content['session'][self._session] = session return session def _logout(self): """Logs out and remove the session object ref from the db.""" s = self._session self._session = None if s not in _db_content['session']: raise exception.NovaException( _("Logging out a session that is invalid or already logged " "out: %s") % s) del _db_content['session'][s] def _terminate_session(self, *args, **kwargs): """Terminates a session.""" s = kwargs.get("sessionId")[0] if s not in _db_content['session']: return del _db_content['session'][s] def _check_session(self): """Checks if the session is active.""" if (self._session is None or self._session not in _db_content['session']): LOG.debug(_("Session is faulty")) raise error_util.VimFaultException( [error_util.NOT_AUTHENTICATED], _("Session Invalid")) def _session_is_active(self, *args, **kwargs): try: self._check_session() return True except Exception: return False def _create_vm(self, method, *args, **kwargs): """Creates and registers a VM object with the Host System.""" config_spec = kwargs.get("config") pool = kwargs.get('pool') ds = _db_content["Datastore"].keys()[0] host = _db_content["HostSystem"].keys()[0] vm_dict = {"name": config_spec.name, "ds": [ds], "runtime_host": host, "powerstate": "poweredOff", "vmPathName": config_spec.files.vmPathName, "numCpu": config_spec.numCPUs, "mem": config_spec.memoryMB, "extra_config": config_spec.extraConfig, "virtual_device": config_spec.deviceChange, "instanceUuid": config_spec.instanceUuid} virtual_machine = VirtualMachine(**vm_dict) _create_object("VirtualMachine", virtual_machine) res_pool = _get_object(pool) res_pool.vm.ManagedObjectReference.append(virtual_machine.obj) task_mdo = create_task(method, "success") return task_mdo.obj def _reconfig_vm(self, method, *args, **kwargs): """Reconfigures a VM and sets the properties supplied.""" vm_ref = args[0] vm_mdo = _get_vm_mdo(vm_ref) vm_mdo.reconfig(self.client.factory, kwargs.get("spec")) task_mdo = create_task(method, "success") return task_mdo.obj def _create_copy_disk(self, method, vmdk_file_path): """Creates/copies a vmdk file object in the datastore.""" # We need to add/create both .vmdk and .-flat.vmdk files flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk") _add_file(vmdk_file_path) _add_file(flat_vmdk_file_path) task_mdo = create_task(method, "success") return task_mdo.obj def _extend_disk(self, method, size): """Extend disk size when create a instance.""" task_mdo = create_task(method, "success") return task_mdo.obj def _snapshot_vm(self, method): """Snapshots a VM. Here we do nothing for faking sake.""" task_mdo = create_task(method, "success") return task_mdo.obj def _find_all_by_uuid(self, *args, **kwargs): uuid = kwargs.get('uuid') vm_refs = [] for vm_ref in _db_content.get("VirtualMachine"): vm = _get_object(vm_ref) vm_uuid = vm.get("summary.config.instanceUuid") if vm_uuid == uuid: vm_refs.append(vm_ref) return vm_refs def _delete_snapshot(self, method, *args, **kwargs): """Deletes a VM snapshot. Here we do nothing for faking sake.""" task_mdo = create_task(method, "success") return task_mdo.obj def _delete_disk(self, method, *args, **kwargs): """Deletes .vmdk and -flat.vmdk files corresponding to the VM.""" vmdk_file_path = kwargs.get("name") flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk") _remove_file(vmdk_file_path) _remove_file(flat_vmdk_file_path) task_mdo = create_task(method, "success") return task_mdo.obj def _delete_file(self, method, *args, **kwargs): """Deletes a file from the datastore.""" _remove_file(kwargs.get("name")) task_mdo = create_task(method, "success") return task_mdo.obj def _just_return(self): """Fakes a return.""" return def _just_return_task(self, method): """Fakes a task return.""" task_mdo = create_task(method, "success") return task_mdo.obj def _clone_vm(self, method, *args, **kwargs): """Fakes a VM clone.""" """Creates and registers a VM object with the Host System.""" source_vmref = args[0] source_vm_mdo = _get_vm_mdo(source_vmref) clone_spec = kwargs.get("spec") ds = _db_content["Datastore"].keys()[0] host = _db_content["HostSystem"].keys()[0] vm_dict = { "name": kwargs.get("name"), "ds": source_vm_mdo.get("datastore"), "runtime_host": source_vm_mdo.get("runtime.host"), "powerstate": source_vm_mdo.get("runtime.powerState"), "vmPathName": source_vm_mdo.get("config.files.vmPathName"), "numCpu": source_vm_mdo.get("summary.config.numCpu"), "mem": source_vm_mdo.get("summary.config.memorySizeMB"), "extra_config": source_vm_mdo.get("config.extraConfig").OptionValue, "virtual_device": source_vm_mdo.get("config.hardware.device"), "instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")} if clone_spec.config is not None: # Impose the config changes specified in the config property if (hasattr(clone_spec.config, 'instanceUuid') and clone_spec.config.instanceUuid is not None): vm_dict["instanceUuid"] = clone_spec.config.instanceUuid if hasattr(clone_spec.config, 'extraConfig'): extraConfigs = _merge_extraconfig(vm_dict["extra_config"], clone_spec.config.extraConfig) vm_dict["extra_config"] = extraConfigs virtual_machine = VirtualMachine(**vm_dict) _create_object("VirtualMachine", virtual_machine) task_mdo = create_task(method, "success") return task_mdo.obj def _unregister_vm(self, method, *args, **kwargs): """Unregisters a VM from the Host System.""" vm_ref = args[0] _get_vm_mdo(vm_ref) del _db_content["VirtualMachine"][vm_ref] def _search_ds(self, method, *args, **kwargs): """Searches the datastore for a file.""" # TODO(garyk): add support for spec parameter ds_path = kwargs.get("datastorePath") if _db_content.get("files", None) is None: raise exception.NoFilesFound() matched_files = set() # Check if we are searching for a file or a directory directory = False dname = '%s/' % ds_path for file in _db_content.get("files"): if file == dname: directory = True break # A directory search implies that we must return all # subdirectories if directory: for file in _db_content.get("files"): if file.find(ds_path) != -1: if not file.endswith(ds_path): path = file.lstrip(dname).split('/') if path: matched_files.add(path[0]) if not matched_files: matched_files.add('/') else: for file in _db_content.get("files"): if file.find(ds_path) != -1: matched_files.add(ds_path) if matched_files: result = DataObject() result.path = ds_path result.file = [] for file in matched_files: matched = DataObject() matched.path = file result.file.append(matched) task_mdo = create_task(method, "success", result=result) else: task_mdo = create_task(method, "error", error_fault=FileNotFound()) return task_mdo.obj def _move_file(self, method, *args, **kwargs): source = kwargs.get('sourceName') destination = kwargs.get('destinationName') new_files = [] if source != destination: for file in _db_content.get("files"): if source in file: new_file = file.replace(source, destination) new_files.append(new_file) # if source is not a file then the children will also # be deleted _remove_file(source) for file in new_files: _add_file(file) task_mdo = create_task(method, "success") return task_mdo.obj def _make_dir(self, method, *args, **kwargs): """Creates a directory in the datastore.""" ds_path = kwargs.get("name") if _db_content.get("files", None) is None: raise exception.NoFilesFound() if get_file(ds_path): raise error_util.FileAlreadyExistsException() _db_content["files"].append('%s/' % ds_path) def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"): """Sets power state for the VM.""" if _db_content.get("VirtualMachine", None) is None: raise exception.NotFound(_("No Virtual Machine has been " "registered yet")) if vm_ref not in _db_content.get("VirtualMachine"): raise exception.NotFound(_("Virtual Machine with ref %s is not " "there") % vm_ref) vm_mdo = _db_content.get("VirtualMachine").get(vm_ref) vm_mdo.set("runtime.powerState", pwr_state) task_mdo = create_task(method, "success") return task_mdo.obj def _retrieve_properties_continue(self, method, *args, **kwargs): """Continues the retrieve.""" return FakeRetrieveResult() def _retrieve_properties_cancel(self, method, *args, **kwargs): """Cancels the retrieve.""" return None def _retrieve_properties(self, method, *args, **kwargs): """Retrieves properties based on the type.""" spec_set = kwargs.get("specSet")[0] type = spec_set.propSet[0].type properties = spec_set.propSet[0].pathSet if not isinstance(properties, list): properties = properties.split() objs = spec_set.objectSet lst_ret_objs = FakeRetrieveResult() for obj in objs: try: obj_ref = obj.obj if obj_ref == "RootFolder": # This means that we are retrieving props for all managed # data objects of the specified 'type' in the entire # inventory. This gets invoked by vim_util.get_objects. mdo_refs = _db_content[type] elif obj_ref.type != type: # This means that we are retrieving props for the managed # data objects in the parent object's 'path' property. # This gets invoked by vim_util.get_inner_objects # eg. obj_ref = # type = 'DataStore' # path = 'datastore' # the above will retrieve all datastores in the given # cluster. parent_mdo = _db_content[obj_ref.type][obj_ref] path = obj.selectSet[0].path mdo_refs = parent_mdo.get(path).ManagedObjectReference else: # This means that we are retrieving props of the given # managed data object. This gets invoked by # vim_util.get_properties_for_a_collection_of_objects. mdo_refs = [obj_ref] for mdo_ref in mdo_refs: mdo = _db_content[type][mdo_ref] prop_list = [] for prop_name in properties: prop = Prop(prop_name, mdo.get(prop_name)) prop_list.append(prop) obj_content = ObjectContent(mdo.obj, prop_list) lst_ret_objs.add_object(obj_content) except Exception as exc: LOG.exception(exc) continue return lst_ret_objs def _add_port_group(self, method, *args, **kwargs): """Adds a port group to the host system.""" _host_sk = _db_content["HostSystem"].keys()[0] host_mdo = _db_content["HostSystem"][_host_sk] host_mdo._add_port_group(kwargs.get("portgrp")) def _add_iscsi_send_tgt(self, method, *args, **kwargs): """Adds a iscsi send target to the hba.""" send_targets = kwargs.get('targets') host_storage_sys = _get_objects('HostStorageSystem').objects[0] iscsi_hba_array = host_storage_sys.get('storageDeviceInfo' '.hostBusAdapter') iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0] if hasattr(iscsi_hba, 'configuredSendTarget'): iscsi_hba.configuredSendTarget.extend(send_targets) else: iscsi_hba.configuredSendTarget = send_targets def __getattr__(self, attr_name): if attr_name != "Login": self._check_session() if attr_name == "Login": return lambda *args, **kwargs: self._login() elif attr_name == "Logout": self._logout() elif attr_name == "SessionIsActive": return lambda *args, **kwargs: self._session_is_active( *args, **kwargs) elif attr_name == "TerminateSession": return lambda *args, **kwargs: self._terminate_session( *args, **kwargs) elif attr_name == "CreateVM_Task": return lambda *args, **kwargs: self._create_vm(attr_name, *args, **kwargs) elif attr_name == "ReconfigVM_Task": return lambda *args, **kwargs: self._reconfig_vm(attr_name, *args, **kwargs) elif attr_name == "CreateVirtualDisk_Task": return lambda *args, **kwargs: self._create_copy_disk(attr_name, kwargs.get("name")) elif attr_name == "DeleteDatastoreFile_Task": return lambda *args, **kwargs: self._delete_file(attr_name, *args, **kwargs) elif attr_name == "PowerOnVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "poweredOn") elif attr_name == "PowerOffVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "poweredOff") elif attr_name == "RebootGuest": return lambda *args, **kwargs: self._just_return() elif attr_name == "ResetVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "poweredOn") elif attr_name == "SuspendVM_Task": return lambda *args, **kwargs: self._set_power_state(attr_name, args[0], "suspended") elif attr_name == "CreateSnapshot_Task": return lambda *args, **kwargs: self._snapshot_vm(attr_name) elif attr_name == "RemoveSnapshot_Task": return lambda *args, **kwargs: self._delete_snapshot(attr_name, *args, **kwargs) elif attr_name == "CopyVirtualDisk_Task": return lambda *args, **kwargs: self._create_copy_disk(attr_name, kwargs.get("destName")) elif attr_name == "ExtendVirtualDisk_Task": return lambda *args, **kwargs: self._extend_disk(attr_name, kwargs.get("size")) elif attr_name == "Destroy_Task": return lambda *args, **kwargs: self._unregister_vm(attr_name, *args, **kwargs) elif attr_name == "UnregisterVM": return lambda *args, **kwargs: self._unregister_vm(attr_name, *args, **kwargs) elif attr_name == "CloneVM_Task": return lambda *args, **kwargs: self._clone_vm(attr_name, *args, **kwargs) elif attr_name == "FindAllByUuid": return lambda *args, **kwargs: self._find_all_by_uuid(attr_name, *args, **kwargs) elif attr_name == "Rename_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "SearchDatastore_Task": return lambda *args, **kwargs: self._search_ds(attr_name, *args, **kwargs) elif attr_name == "MoveDatastoreFile_Task": return lambda *args, **kwargs: self._move_file(attr_name, *args, **kwargs) elif attr_name == "MakeDirectory": return lambda *args, **kwargs: self._make_dir(attr_name, *args, **kwargs) elif attr_name == "RetrievePropertiesEx": return lambda *args, **kwargs: self._retrieve_properties( attr_name, *args, **kwargs) elif attr_name == "ContinueRetrievePropertiesEx": return lambda *args, **kwargs: self._retrieve_properties_continue( attr_name, *args, **kwargs) elif attr_name == "CancelRetrievePropertiesEx": return lambda *args, **kwargs: self._retrieve_properties_cancel( attr_name, *args, **kwargs) elif attr_name == "AcquireCloneTicket": return lambda *args, **kwargs: self._just_return() elif attr_name == "AddPortGroup": return lambda *args, **kwargs: self._add_port_group(attr_name, *args, **kwargs) elif attr_name == "RebootHost_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "ShutdownHost_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "PowerDownHostToStandBy_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "PowerUpHostFromStandBy_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "EnterMaintenanceMode_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "ExitMaintenanceMode_Task": return lambda *args, **kwargs: self._just_return_task(attr_name) elif attr_name == "AddInternetScsiSendTargets": return lambda *args, **kwargs: self._add_iscsi_send_tgt(attr_name, *args, **kwargs) elif attr_name == "RescanHba": return lambda *args, **kwargs: self._just_return_task(attr_name) nova-2014.1.5/nova/virt/vmwareapi/vmops.py0000664000567000056700000025525012540642544021526 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Class for VM tasks like spawn, snapshot, suspend, resume etc. """ import collections import copy import os from oslo.config import cfg from nova.api.metadata import base as instance_metadata from nova import compute from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import units from nova.openstack.common import uuidutils from nova import utils from nova.virt import configdrive from nova.virt import driver from nova.virt.vmwareapi import ds_util from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import imagecache from nova.virt.vmwareapi import vif as vmwarevif from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import vmware_images vmware_vif_opts = [ cfg.StrOpt('integration_bridge', default='br-int', help='Name of Integration Bridge'), ] vmware_group = cfg.OptGroup(name='vmware', title='VMware Options') CONF = cfg.CONF CONF.register_group(vmware_group) CONF.register_opts(vmware_vif_opts, vmware_group) CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('remove_unused_base_images', 'nova.virt.imagecache') CONF.import_opt('vnc_enabled', 'nova.vnc') CONF.import_opt('my_ip', 'nova.netconf') LOG = logging.getLogger(__name__) VMWARE_POWER_STATES = { 'poweredOff': power_state.SHUTDOWN, 'poweredOn': power_state.RUNNING, 'suspended': power_state.SUSPENDED} VMWARE_LINKED_CLONE = 'vmware_linked_clone' RESIZE_TOTAL_STEPS = 4 DcInfo = collections.namedtuple('DcInfo', ['ref', 'name', 'vmFolder']) class VMwareVMOps(object): """Management class for VM-related tasks.""" def __init__(self, session, virtapi, volumeops, cluster=None, datastore_regex=None): """Initializer.""" self.compute_api = compute.API() self._session = session self._virtapi = virtapi self._volumeops = volumeops self._cluster = cluster self._datastore_regex = datastore_regex # Ensure that the base folder is unique per compute node if CONF.remove_unused_base_images: self._base_folder = '%s%s' % (CONF.my_ip, CONF.image_cache_subdirectory_name) else: # Aging disable ensures backward compatibility self._base_folder = CONF.image_cache_subdirectory_name self._tmp_folder = 'vmware_temp' self._default_root_device = 'vda' self._rescue_suffix = '-rescue' self._migrate_suffix = '-orig' self._poll_rescue_last_ran = None self._is_neutron = utils.is_neutron() self._datastore_dc_mapping = {} self._datastore_browser_mapping = {} self._imagecache = imagecache.ImageCacheManager(self._session, self._base_folder) def list_instances(self): """Lists the VM instances that are registered with the ESX host.""" LOG.debug(_("Getting list of instances")) vms = self._session._call_method(vim_util, "get_objects", "VirtualMachine", ["name", "runtime.connectionState"]) lst_vm_names = self._get_valid_vms_from_retrieve_result(vms) LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names))) return lst_vm_names def _extend_virtual_disk(self, instance, requested_size, name, dc_ref): service_content = self._session._get_vim().get_service_content() LOG.debug(_("Extending root virtual disk to %s"), requested_size) vmdk_extend_task = self._session._call_method( self._session._get_vim(), "ExtendVirtualDisk_Task", service_content.virtualDiskManager, name=name, datacenter=dc_ref, newCapacityKb=requested_size, eagerZero=False) try: self._session._wait_for_task(vmdk_extend_task) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_('Extending virtual disk failed with error: %s'), e, instance=instance) # Clean up files created during the extend operation files = [name.replace(".vmdk", "-flat.vmdk"), name] for file in files: self._delete_datastore_file(instance, file, dc_ref) LOG.debug(_("Extended root virtual disk")) def _delete_datastore_file(self, instance, datastore_path, dc_ref): try: ds_util.file_delete(self._session, datastore_path, dc_ref) except (error_util.CannotDeleteFileException, error_util.FileFaultException, error_util.FileLockedException, error_util.FileNotFoundException) as e: LOG.debug(_("Unable to delete %(ds)s. There may be more than " "one process or thread that tries to delete the file. " "Exception: %(ex)s"), {'ds': datastore_path, 'ex': e}) def _get_vmdk_path(self, ds_name, folder, name): path = "%s/%s.vmdk" % (folder, name) return ds_util.build_datastore_path(ds_name, path) def _get_disk_format(self, image_meta): disk_format = image_meta.get('disk_format') if disk_format not in ['iso', 'vmdk', None]: raise exception.InvalidDiskFormat(disk_format=disk_format) return (disk_format, disk_format == 'iso') def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None, instance_name=None, power_on=True): """Creates a VM instance. Steps followed are: 1. Create a VM with no disk and the specifics in the instance object like RAM size. 2. For flat disk 2.1. Create a dummy vmdk of the size of the disk file that is to be uploaded. This is required just to create the metadata file. 2.2. Delete the -flat.vmdk file created in the above step and retain the metadata .vmdk file. 2.3. Upload the disk file. 3. For sparse disk 3.1. Upload the disk file to a -sparse.vmdk file. 3.2. Copy/Clone the -sparse.vmdk file to a thin vmdk. 3.3. Delete the -sparse.vmdk file. 4. Attach the disk to the VM by reconfiguring the same. 5. Power on the VM. """ ebs_root = False if block_device_info: msg = "Block device information present: %s" % block_device_info # NOTE(mriedem): block_device_info can contain an auth_password # so we have to scrub the message before logging it. LOG.debug(logging.mask_password(msg), instance=instance) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) if block_device_mapping: ebs_root = True (file_type, is_iso) = self._get_disk_format(image_meta) client_factory = self._session._get_vim().client.factory service_content = self._session._get_vim().get_service_content() ds = vm_util.get_datastore_ref_and_name(self._session, self._cluster, datastore_regex=self._datastore_regex) data_store_ref = ds[0] data_store_name = ds[1] dc_info = self.get_datacenter_ref_and_name(data_store_ref) #TODO(hartsocks): this pattern is confusing, reimplement as methods # The use of nested functions in this file makes for a confusing and # hard to maintain file. At some future date, refactor this method to # be a full-fledged method. This will also make unit testing easier. def _get_image_properties(root_size): """Get the Size of the flat vmdk file that is there on the storage repository. """ image_ref = instance.get('image_ref') if image_ref: _image_info = vmware_images.get_vmdk_size_and_properties( context, image_ref, instance) else: # The case that the image may be booted from a volume _image_info = (root_size, {}) image_size, image_properties = _image_info vmdk_file_size_in_kb = int(image_size) / 1024 os_type = image_properties.get("vmware_ostype", "otherGuest") adapter_type = image_properties.get("vmware_adaptertype", "lsiLogic") disk_type = image_properties.get("vmware_disktype", "preallocated") # Get the network card type from the image properties. vif_model = image_properties.get("hw_vif_model", "VirtualE1000") # Fetch the image_linked_clone data here. It is retrieved # with the above network based API call. To retrieve it # later will necessitate additional network calls using the # identical method. Consider this a cache. image_linked_clone = image_properties.get(VMWARE_LINKED_CLONE) return (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model, image_linked_clone) root_gb = instance['root_gb'] root_gb_in_kb = root_gb * units.Mi (vmdk_file_size_in_kb, os_type, adapter_type, disk_type, vif_model, image_linked_clone) = _get_image_properties(root_gb_in_kb) if root_gb_in_kb and vmdk_file_size_in_kb > root_gb_in_kb: reason = _("Image disk size greater than requested disk size") raise exception.InstanceUnacceptable(instance_id=instance['uuid'], reason=reason) node_mo_id = vm_util.get_mo_id_from_instance(instance) res_pool_ref = vm_util.get_res_pool_ref(self._session, self._cluster, node_mo_id) def _get_vif_infos(): vif_infos = [] if network_info is None: return vif_infos for vif in network_info: mac_address = vif['address'] network_name = vif['network']['bridge'] or \ CONF.vmware.integration_bridge network_ref = vmwarevif.get_network_ref(self._session, self._cluster, vif, self._is_neutron) vif_infos.append({'network_name': network_name, 'mac_address': mac_address, 'network_ref': network_ref, 'iface_id': vif['id'], 'vif_model': vif_model }) return vif_infos vif_infos = _get_vif_infos() # Get the instance name. In some cases this may differ from the 'uuid', # for example when the spawn of a rescue instance takes place. if not instance_name: instance_name = instance['uuid'] # Get the create vm config spec config_spec = vm_util.get_vm_create_spec( client_factory, instance, instance_name, data_store_name, vif_infos, os_type) def _execute_create_vm(): """Create VM on ESX host.""" LOG.debug(_("Creating VM on the ESX host"), instance=instance) # Create the VM on the ESX host vm_create_task = self._session._call_method( self._session._get_vim(), "CreateVM_Task", dc_info.vmFolder, config=config_spec, pool=res_pool_ref) self._session._wait_for_task(vm_create_task) LOG.debug(_("Created VM on the ESX host"), instance=instance) _execute_create_vm() # In the case of a rescue disk the instance_name is not the same as # instance UUID. In this case the VM reference is accessed via the # instance name. if instance_name != instance['uuid']: vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) else: vm_ref = vm_util.get_vm_ref(self._session, instance) # Set the machine.id parameter of the instance to inject # the NIC configuration inside the VM if CONF.flat_injected: self._set_machine_id(client_factory, instance, network_info) # Set the vnc configuration of the instance, vnc port starts from 5900 if CONF.vnc_enabled: self._get_and_set_vnc_config(client_factory, instance) def _create_virtual_disk(virtual_disk_path, file_size_in_kb): """Create a virtual disk of the size of flat vmdk file.""" # Create a Virtual Disk of the size of the flat vmdk file. This is # done just to generate the meta-data file whose specifics # depend on the size of the disk, thin/thick provisioning and the # storage adapter type. # Here we assume thick provisioning and lsiLogic for the adapter # type LOG.debug(_("Creating Virtual Disk of size " "%(vmdk_file_size_in_kb)s KB and adapter type " "%(adapter_type)s on the ESX host local store " "%(data_store_name)s"), {"vmdk_file_size_in_kb": file_size_in_kb, "adapter_type": adapter_type, "data_store_name": data_store_name}, instance=instance) vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory, file_size_in_kb, adapter_type, disk_type) vmdk_create_task = self._session._call_method( self._session._get_vim(), "CreateVirtualDisk_Task", service_content.virtualDiskManager, name=virtual_disk_path, datacenter=dc_info.ref, spec=vmdk_create_spec) self._session._wait_for_task(vmdk_create_task) LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s" " KB and type %(disk_type)s on " "the ESX host local store %(data_store_name)s") % {"vmdk_file_size_in_kb": vmdk_file_size_in_kb, "disk_type": disk_type, "data_store_name": data_store_name}, instance=instance) def _fetch_image_on_datastore(upload_name): """Fetch image from Glance to datastore.""" LOG.debug(_("Downloading image file data %(image_ref)s to the " "data store %(data_store_name)s") % {'image_ref': instance['image_ref'], 'data_store_name': data_store_name}, instance=instance) vmware_images.fetch_image( context, instance['image_ref'], instance, host=self._session._host_ip, data_center_name=dc_info.name, datastore_name=data_store_name, cookies=cookies, file_path=upload_name) LOG.debug(_("Downloaded image file data %(image_ref)s to " "%(upload_name)s on the data store " "%(data_store_name)s") % {'image_ref': instance['image_ref'], 'upload_name': upload_name, 'data_store_name': data_store_name}, instance=instance) def _copy_virtual_disk(source, dest): """Copy a sparse virtual disk to a thin virtual disk.""" # Copy a sparse virtual disk to a thin virtual disk. This is also # done to generate the meta-data file whose specifics # depend on the size of the disk, thin/thick provisioning and the # storage adapter type. LOG.debug(_("Copying Virtual Disk of size " "%(vmdk_file_size_in_kb)s KB and adapter type " "%(adapter_type)s on the ESX host local store " "%(data_store_name)s to disk type %(disk_type)s") % {"vmdk_file_size_in_kb": vmdk_file_size_in_kb, "adapter_type": adapter_type, "data_store_name": data_store_name, "disk_type": disk_type}, instance=instance) vmdk_copy_spec = self.get_copy_virtual_disk_spec(client_factory, adapter_type, disk_type) vmdk_copy_task = self._session._call_method( self._session._get_vim(), "CopyVirtualDisk_Task", service_content.virtualDiskManager, sourceName=source, sourceDatacenter=dc_info.ref, destName=dest, destSpec=vmdk_copy_spec) self._session._wait_for_task(vmdk_copy_task) LOG.debug(_("Copied Virtual Disk of size %(vmdk_file_size_in_kb)s" " KB and type %(disk_type)s on " "the ESX host local store %(data_store_name)s") % {"vmdk_file_size_in_kb": vmdk_file_size_in_kb, "disk_type": disk_type, "data_store_name": data_store_name}, instance=instance) if not ebs_root: # this logic allows for instances or images to decide # for themselves which strategy is best for them. linked_clone = VMwareVMOps.decide_linked_clone( image_linked_clone, CONF.vmware.use_linked_clone ) upload_name = instance['image_ref'] upload_folder = '%s/%s' % (self._base_folder, upload_name) # The vmdk meta-data file uploaded_file_name = "%s/%s.%s" % (upload_folder, upload_name, file_type) uploaded_file_path = ds_util.build_datastore_path(data_store_name, uploaded_file_name) session_vim = self._session._get_vim() cookies = session_vim.client.options.transport.cookiejar ds_browser = self._get_ds_browser(data_store_ref) upload_file_name = upload_name + ".%s" % file_type # Check if the timestamp file exists - if so then delete it. This # will ensure that the aging will not delete a cache image if it # is going to be used now. if CONF.remove_unused_base_images: ds_path = ds_util.build_datastore_path(data_store_name, self._base_folder) path = self._imagecache.timestamp_folder_get(ds_path, upload_name) # Lock to ensure that the spawn will not try and access a image # that is currently being deleted on the datastore. with lockutils.lock(path, lock_file_prefix='nova-vmware-ts', external=True): self._imagecache.timestamp_cleanup(dc_info.ref, ds_browser, data_store_ref, data_store_name, path) # Check if the image exists in the datastore cache. If not the # image will be uploaded and cached. if not (self._check_if_folder_file_exists(ds_browser, data_store_ref, data_store_name, upload_folder, upload_file_name)): # Upload will be done to the self._tmp_folder and then moved # to the self._base_folder tmp_upload_folder = '%s/%s' % (self._tmp_folder, uuidutils.generate_uuid()) upload_folder = '%s/%s' % (tmp_upload_folder, upload_name) # Naming the VM files in correspondence with the VM instance # The flat vmdk file name flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % ( upload_folder, upload_name) # The sparse vmdk file name for sparse disk image sparse_uploaded_vmdk_name = "%s/%s-sparse.vmdk" % ( upload_folder, upload_name) flat_uploaded_vmdk_path = ds_util.build_datastore_path( data_store_name, flat_uploaded_vmdk_name) sparse_uploaded_vmdk_path = ds_util.build_datastore_path( data_store_name, sparse_uploaded_vmdk_name) upload_file_name = "%s/%s.%s" % (upload_folder, upload_name, file_type) upload_path = ds_util.build_datastore_path(data_store_name, upload_file_name) if not is_iso: if disk_type != "sparse": # Create a flat virtual disk and retain the metadata # file. This will be done in the unique temporary # directory. ds_util.mkdir(self._session, ds_util.build_datastore_path( data_store_name, upload_folder), dc_info.ref) _create_virtual_disk(upload_path, vmdk_file_size_in_kb) self._delete_datastore_file(instance, flat_uploaded_vmdk_path, dc_info.ref) upload_file_name = flat_uploaded_vmdk_name else: upload_file_name = sparse_uploaded_vmdk_name _fetch_image_on_datastore(upload_file_name) if not is_iso and disk_type == "sparse": # Copy the sparse virtual disk to a thin virtual disk. disk_type = "thin" _copy_virtual_disk(sparse_uploaded_vmdk_path, upload_path) self._delete_datastore_file(instance, sparse_uploaded_vmdk_path, dc_info.ref) base_folder = '%s/%s' % (self._base_folder, upload_name) dest_folder = ds_util.build_datastore_path(data_store_name, base_folder) src_folder = ds_util.build_datastore_path(data_store_name, upload_folder) try: ds_util.file_move(self._session, dc_info.ref, src_folder, dest_folder) except error_util.FileAlreadyExistsException: # File move has failed. This may be due to the fact that a # process or thread has already completed the opertaion. # In the event of a FileAlreadyExists we continue, # all other exceptions will be raised. LOG.debug(_("File %s already exists"), dest_folder) # Delete the temp upload folder self._delete_datastore_file(instance, ds_util.build_datastore_path(data_store_name, tmp_upload_folder), dc_info.ref) else: # linked clone base disk exists if disk_type == "sparse": disk_type = "thin" if is_iso: if root_gb_in_kb: dest_vmdk_path = self._get_vmdk_path(data_store_name, instance['uuid'], instance_name) # Create the blank virtual disk for the VM _create_virtual_disk(dest_vmdk_path, root_gb_in_kb) root_vmdk_path = dest_vmdk_path else: root_vmdk_path = None else: # Extend the disk size if necessary if not linked_clone: # If we are not using linked_clone, copy the image from # the cache into the instance directory. If we are using # linked clone it is references from the cache directory dest_vmdk_path = self._get_vmdk_path(data_store_name, instance_name, instance_name) _copy_virtual_disk(uploaded_file_path, dest_vmdk_path) root_vmdk_path = dest_vmdk_path if root_gb_in_kb > vmdk_file_size_in_kb: self._extend_virtual_disk(instance, root_gb_in_kb, root_vmdk_path, dc_info.ref) else: upload_folder = '%s/%s' % (self._base_folder, upload_name) if root_gb: root_vmdk_name = "%s/%s.%s.vmdk" % (upload_folder, upload_name, root_gb) else: root_vmdk_name = "%s/%s.vmdk" % (upload_folder, upload_name) root_vmdk_path = ds_util.build_datastore_path( data_store_name, root_vmdk_name) # Ensure only a single thread extends the image at once. # We do this by taking a lock on the name of the extended # image. This allows multiple threads to create resized # copies simultaneously, as long as they are different # sizes. Threads attempting to create the same resized copy # will be serialized, with only the first actually creating # the copy. # # Note that the object is in a per-nova cache directory, # so inter-nova locking is not a concern. Consequently we # can safely use simple thread locks. with lockutils.lock(root_vmdk_path, lock_file_prefix='nova-vmware-image'): if not self._check_if_folder_file_exists( ds_browser, data_store_ref, data_store_name, upload_folder, upload_name + ".%s.vmdk" % root_gb): LOG.debug("Copying root disk of size %sGb", root_gb) # Create a copy of the base image, ensuring we # clean up on failure try: _copy_virtual_disk(uploaded_file_path, root_vmdk_path) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_('Failed to copy cached ' 'image %(source)s to ' '%(dest)s for resize: ' '%(error)s'), {'source': uploaded_file_path, 'dest': root_vmdk_path, 'error': e.message}) try: ds_util.file_delete(self._session, root_vmdk_path, dc_info.ref) except error_util.FileNotFoundException: # File was never created: cleanup not # required pass # Resize the copy to the appropriate size. No need # for cleanup up here, as _extend_virtual_disk # already does it if root_gb_in_kb > vmdk_file_size_in_kb: self._extend_virtual_disk(instance, root_gb_in_kb, root_vmdk_path, dc_info.ref) # Attach the root disk to the VM. if root_vmdk_path: self._volumeops.attach_disk_to_vm( vm_ref, instance, adapter_type, disk_type, root_vmdk_path, root_gb_in_kb, linked_clone) if is_iso: self._attach_cdrom_to_vm( vm_ref, instance, data_store_ref, uploaded_file_path) if configdrive.required_by(instance): uploaded_iso_path = self._create_config_drive(instance, injected_files, admin_password, data_store_name, dc_info.name, instance['uuid'], cookies) uploaded_iso_path = ds_util.build_datastore_path( data_store_name, uploaded_iso_path) self._attach_cdrom_to_vm( vm_ref, instance, data_store_ref, uploaded_iso_path) else: # Attach the root disk to the VM. for root_disk in block_device_mapping: connection_info = root_disk['connection_info'] self._volumeops.attach_root_volume(connection_info, instance, self._default_root_device, data_store_ref) def _power_on_vm(): """Power on the VM.""" LOG.debug(_("Powering on the VM instance"), instance=instance) # Power On the VM power_on_task = self._session._call_method( self._session._get_vim(), "PowerOnVM_Task", vm_ref) self._session._wait_for_task(power_on_task) LOG.debug(_("Powered on the VM instance"), instance=instance) if power_on: _power_on_vm() def _create_config_drive(self, instance, injected_files, admin_password, data_store_name, dc_name, upload_folder, cookies): if CONF.config_drive_format != 'iso9660': reason = (_('Invalid config_drive_format "%s"') % CONF.config_drive_format) raise exception.InstancePowerOnFailure(reason=reason) LOG.info(_('Using config drive for instance'), instance=instance) extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata(instance, content=injected_files, extra_md=extra_md) try: with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: with utils.tempdir() as tmp_path: tmp_file = os.path.join(tmp_path, 'configdrive.iso') cdb.make_drive(tmp_file) upload_iso_path = "%s/configdrive.iso" % ( upload_folder) vmware_images.upload_iso_to_datastore( tmp_file, instance, host=self._session._host_ip, data_center_name=dc_name, datastore_name=data_store_name, cookies=cookies, file_path=upload_iso_path) return upload_iso_path except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_('Creating config drive failed with error: %s'), e, instance=instance) def _attach_cdrom_to_vm(self, vm_ref, instance, datastore, file_path): """Attach cdrom to VM by reconfiguration.""" instance_name = instance['name'] instance_uuid = instance['uuid'] client_factory = self._session._get_vim().client.factory devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( client_factory, devices, 'ide') cdrom_attach_config_spec = vm_util.get_cdrom_attach_config_spec( client_factory, datastore, file_path, controller_key, unit_number) if controller_spec: cdrom_attach_config_spec.deviceChange.append(controller_spec) LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach " "cdrom %(file_path)s"), {'instance_name': instance_name, 'file_path': file_path}) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=cdrom_attach_config_spec) self._session._wait_for_task(reconfig_task) LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach " "cdrom %(file_path)s"), {'instance_name': instance_name, 'file_path': file_path}) @staticmethod def decide_linked_clone(image_linked_clone, global_linked_clone): """Explicit decision logic: whether to use linked clone on a vmdk. This is *override* logic not boolean logic. 1. let the image over-ride if set at all 2. default to the global setting In math terms, I need to allow: glance image to override global config. That is g vs c. "g" for glance. "c" for Config. So, I need g=True vs c=False to be True. And, I need g=False vs c=True to be False. And, I need g=None vs c=True to be True. Some images maybe independently best tuned for use_linked_clone=True saving datastorage space. Alternatively a whole OpenStack install may be tuned to performance use_linked_clone=False but a single image in this environment may be best configured to save storage space and set use_linked_clone=True only for itself. The point is: let each layer of control override the layer beneath it. rationale: For technical discussion on the clone strategies and their trade-offs see: https://www.vmware.com/support/ws5/doc/ws_clone_typeofclone.html :param image_linked_clone: boolean or string or None :param global_linked_clone: boolean or string or None :return: Boolean """ value = None # Consider the values in order of override. if image_linked_clone is not None: value = image_linked_clone else: # this will never be not-set by this point. value = global_linked_clone return strutils.bool_from_string(value) def get_copy_virtual_disk_spec(self, client_factory, adapter_type, disk_type): return vm_util.get_copy_virtual_disk_spec(client_factory, adapter_type, disk_type) def _create_vm_snapshot(self, instance, vm_ref): LOG.debug(_("Creating Snapshot of the VM instance"), instance=instance) snapshot_task = self._session._call_method( self._session._get_vim(), "CreateSnapshot_Task", vm_ref, name="%s-snapshot" % instance['uuid'], description="Taking Snapshot of the VM", memory=False, quiesce=True) self._session._wait_for_task(snapshot_task) LOG.debug(_("Created Snapshot of the VM instance"), instance=instance) task_info = self._session._call_method(vim_util, "get_dynamic_property", snapshot_task, "Task", "info") snapshot = task_info.result return snapshot def _delete_vm_snapshot(self, instance, vm_ref, snapshot): LOG.debug(_("Deleting Snapshot of the VM instance"), instance=instance) delete_snapshot_task = self._session._call_method( self._session._get_vim(), "RemoveSnapshot_Task", snapshot, removeChildren=False, consolidate=True) self._session._wait_for_task(delete_snapshot_task) LOG.debug(_("Deleted Snapshot of the VM instance"), instance=instance) def snapshot(self, context, instance, image_id, update_task_state): """Create snapshot from a running VM instance. Steps followed are: 1. Get the name of the vmdk file which the VM points to right now. Can be a chain of snapshots, so we need to know the last in the chain. 2. Create the snapshot. A new vmdk is created which the VM points to now. The earlier vmdk becomes read-only. 3. Call CopyVirtualDisk which coalesces the disk chain to form a single vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file. 4. Now upload the -flat.vmdk file to the image store. 5. Delete the coalesced .vmdk and -flat.vmdk created. """ vm_ref = vm_util.get_vm_ref(self._session, instance) client_factory = self._session._get_vim().client.factory service_content = self._session._get_vim().get_service_content() def _get_vm_and_vmdk_attribs(): # Get the vmdk file name that the VM is pointing to hw_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") (vmdk_file_path_before_snapshot, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type( hw_devices, uuid=instance['uuid']) if not vmdk_file_path_before_snapshot: LOG.debug("No root disk defined. Unable to snapshot.") raise error_util.NoRootDiskDefined() datastore_name = ds_util.split_datastore_path( vmdk_file_path_before_snapshot)[0] os_type = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "summary.config.guestId") return (vmdk_file_path_before_snapshot, adapter_type, disk_type, datastore_name, os_type) (vmdk_file_path_before_snapshot, adapter_type, disk_type, datastore_name, os_type) = _get_vm_and_vmdk_attribs() snapshot = self._create_vm_snapshot(instance, vm_ref) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) def _check_if_tmp_folder_exists(): # Copy the contents of the VM that were there just before the # snapshot was taken ds_ref_ret = self._session._call_method( vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "datastore") if ds_ref_ret is None: raise exception.DatastoreNotFound() ds_ref = ds_ref_ret.ManagedObjectReference[0] self.check_temp_folder(datastore_name, ds_ref) return ds_ref ds_ref = _check_if_tmp_folder_exists() # Generate a random vmdk file name to which the coalesced vmdk content # will be copied to. A random name is chosen so that we don't have # name clashes. random_name = uuidutils.generate_uuid() dest_vmdk_file_path = ds_util.build_datastore_path(datastore_name, "%s/%s.vmdk" % (self._tmp_folder, random_name)) dest_vmdk_data_file_path = ds_util.build_datastore_path(datastore_name, "%s/%s-flat.vmdk" % (self._tmp_folder, random_name)) dc_info = self.get_datacenter_ref_and_name(ds_ref) def _copy_vmdk_content(): # Consolidate the snapshotted disk to a temporary vmdk. copy_spec = self.get_copy_virtual_disk_spec(client_factory, adapter_type, disk_type) LOG.debug(_('Copying snapshotted disk %s.'), vmdk_file_path_before_snapshot, instance=instance) copy_disk_task = self._session._call_method( self._session._get_vim(), "CopyVirtualDisk_Task", service_content.virtualDiskManager, sourceName=vmdk_file_path_before_snapshot, sourceDatacenter=dc_info.ref, destName=dest_vmdk_file_path, destDatacenter=dc_info.ref, destSpec=copy_spec, force=False) self._session._wait_for_task(copy_disk_task) LOG.debug(_('Copied snapshotted disk %s.'), vmdk_file_path_before_snapshot, instance=instance) _copy_vmdk_content() # Note(vui): handle snapshot cleanup on exceptions. self._delete_vm_snapshot(instance, vm_ref, snapshot) cookies = self._session._get_vim().client.options.transport.cookiejar def _upload_vmdk_to_image_repository(): # Upload the contents of -flat.vmdk file which has the disk data. LOG.debug(_("Uploading image %s") % image_id, instance=instance) vmware_images.upload_image( context, image_id, instance, os_type=os_type, disk_type="preallocated", adapter_type=adapter_type, image_version=1, host=self._session._host_ip, data_center_name=dc_info.name, datastore_name=datastore_name, cookies=cookies, file_path="%s/%s-flat.vmdk" % (self._tmp_folder, random_name)) LOG.debug(_("Uploaded image %s") % image_id, instance=instance) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) _upload_vmdk_to_image_repository() def _clean_temp_data(): """Delete temporary vmdk files generated in image handling operations. """ # The data file is the one occupying space, and likelier to see # deletion problems, so prioritize its deletion first. In the # unlikely event that its deletion fails, the small descriptor file # is retained too by design since it makes little sense to remove # it when the data disk it refers to still lingers. for f in dest_vmdk_data_file_path, dest_vmdk_file_path: self._delete_datastore_file(instance, f, dc_info.ref) _clean_temp_data() def _get_values_from_object_properties(self, props, query): while props: token = vm_util._get_token(props) for elem in props.objects: for prop in elem.propSet: for key in query.keys(): if prop.name == key: query[key] = prop.val break if token: props = self._session._call_method(vim_util, "continue_to_get_objects", token) else: break def reboot(self, instance, network_info): """Reboot a VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) lst_properties = ["summary.guest.toolsStatus", "runtime.powerState", "summary.guest.toolsRunningStatus"] props = self._session._call_method(vim_util, "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) query = {'runtime.powerState': None, 'summary.guest.toolsStatus': None, 'summary.guest.toolsRunningStatus': False} self._get_values_from_object_properties(props, query) pwr_state = query['runtime.powerState'] tools_status = query['summary.guest.toolsStatus'] tools_running_status = query['summary.guest.toolsRunningStatus'] # Raise an exception if the VM is not powered On. if pwr_state not in ["poweredOn"]: reason = _("instance is not powered on") raise exception.InstanceRebootFailure(reason=reason) # If latest vmware tools are installed in the VM, and that the tools # are running, then only do a guest reboot. Otherwise do a hard reset. if (tools_status == "toolsOk" and tools_running_status == "guestToolsRunning"): LOG.debug(_("Rebooting guest OS of VM"), instance=instance) self._session._call_method(self._session._get_vim(), "RebootGuest", vm_ref) LOG.debug(_("Rebooted guest OS of VM"), instance=instance) else: LOG.debug(_("Doing hard reboot of VM"), instance=instance) reset_task = self._session._call_method(self._session._get_vim(), "ResetVM_Task", vm_ref) self._session._wait_for_task(reset_task) LOG.debug(_("Did hard reboot of VM"), instance=instance) def _delete(self, instance, network_info): """Destroy a VM instance. Steps followed are: 1. Power off the VM, if it is in poweredOn state. 2. Destroy the VM. """ try: vm_ref = vm_util.get_vm_ref(self._session, instance) self.power_off(instance) try: LOG.debug(_("Destroying the VM"), instance=instance) destroy_task = self._session._call_method( self._session._get_vim(), "Destroy_Task", vm_ref) self._session._wait_for_task(destroy_task) LOG.debug(_("Destroyed the VM"), instance=instance) except Exception as excep: LOG.warn(_("In vmwareapi:vmops:delete, got this exception" " while destroying the VM: %s") % str(excep)) except Exception as exc: LOG.exception(exc, instance=instance) def _destroy_instance(self, instance, network_info, destroy_disks=True, instance_name=None): # Destroy a VM instance # Get the instance name. In some cases this may differ from the 'uuid', # for example when the spawn of a rescue instance takes place. if not instance_name: instance_name = instance['uuid'] try: vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) if vm_ref is None: LOG.warning(_('Instance does not exist on backend'), instance=instance) return lst_properties = ["config.files.vmPathName", "runtime.powerState", "datastore"] props = self._session._call_method(vim_util, "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) query = {'runtime.powerState': None, 'config.files.vmPathName': None, 'datastore': None} self._get_values_from_object_properties(props, query) pwr_state = query['runtime.powerState'] vm_config_pathname = query['config.files.vmPathName'] datastore_name = None if vm_config_pathname: _ds_path = ds_util.split_datastore_path(vm_config_pathname) datastore_name, vmx_file_path = _ds_path # Power off the VM if it is in PoweredOn state. if pwr_state == "poweredOn": LOG.debug(_("Powering off the VM"), instance=instance) poweroff_task = self._session._call_method( self._session._get_vim(), "PowerOffVM_Task", vm_ref) self._session._wait_for_task(poweroff_task) LOG.debug(_("Powered off the VM"), instance=instance) # Un-register the VM try: LOG.debug(_("Unregistering the VM"), instance=instance) self._session._call_method(self._session._get_vim(), "UnregisterVM", vm_ref) LOG.debug(_("Unregistered the VM"), instance=instance) except Exception as excep: LOG.warn(_("In vmwareapi:vmops:_destroy_instance, got this " "exception while un-registering the VM: %s"), excep) # Delete the folder holding the VM related content on # the datastore. if destroy_disks and datastore_name: try: dir_ds_compliant_path = ds_util.build_datastore_path( datastore_name, os.path.dirname(vmx_file_path)) LOG.debug(_("Deleting contents of the VM from " "datastore %(datastore_name)s") % {'datastore_name': datastore_name}, instance=instance) ds_ref_ret = query['datastore'] ds_ref = ds_ref_ret.ManagedObjectReference[0] dc_info = self.get_datacenter_ref_and_name(ds_ref) ds_util.file_delete(self._session, dir_ds_compliant_path, dc_info.ref) LOG.debug(_("Deleted contents of the VM from " "datastore %(datastore_name)s") % {'datastore_name': datastore_name}, instance=instance) except Exception as excep: LOG.warn(_("In vmwareapi:vmops:_destroy_instance, " "got this exception while deleting " "the VM contents from the disk: %s"), excep) except Exception as exc: LOG.exception(exc, instance=instance) finally: vm_util.vm_ref_cache_delete(instance_name) def destroy(self, instance, network_info, destroy_disks=True): """Destroy a VM instance. Steps followed for each VM are: 1. Power off, if it is in poweredOn state. 2. Un-register. 3. Delete the contents of the folder holding the VM related data. """ # If there is a rescue VM then we need to destroy that one too. LOG.debug(_("Destroying instance"), instance=instance) if instance['vm_state'] == vm_states.RESCUED: LOG.debug(_("Rescue VM configured"), instance=instance) try: self.unrescue(instance, power_on=False) LOG.debug(_("Rescue VM destroyed"), instance=instance) except Exception: rescue_name = instance['uuid'] + self._rescue_suffix self._destroy_instance(instance, network_info, destroy_disks=destroy_disks, instance_name=rescue_name) # NOTE(arnaud): Destroy uuid-orig and uuid VMs iff it is not # triggered by the revert resize api call. This prevents # the uuid-orig VM to be deleted to be able to associate it later. if instance['task_state'] != task_states.RESIZE_REVERTING: # When VM deletion is triggered in middle of VM resize before VM # arrive RESIZED state, uuid-orig VM need to deleted to avoid # VM leak. Within method _destroy_instance it will check vmref # exist or not before attempt deletion. resize_orig_vmname = instance['uuid'] + self._migrate_suffix vm_orig_ref = vm_util.get_vm_ref_from_name(self._session, resize_orig_vmname) if vm_orig_ref: self._destroy_instance(instance, network_info, destroy_disks=destroy_disks, instance_name=resize_orig_vmname) self._destroy_instance(instance, network_info, destroy_disks=destroy_disks) LOG.debug(_("Instance destroyed"), instance=instance) def pause(self, instance): msg = _("pause not supported for vmwareapi") raise NotImplementedError(msg) def unpause(self, instance): msg = _("unpause not supported for vmwareapi") raise NotImplementedError(msg) def suspend(self, instance): """Suspend the specified instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) pwr_state = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "runtime.powerState") # Only PoweredOn VMs can be suspended. if pwr_state == "poweredOn": LOG.debug(_("Suspending the VM"), instance=instance) suspend_task = self._session._call_method(self._session._get_vim(), "SuspendVM_Task", vm_ref) self._session._wait_for_task(suspend_task) LOG.debug(_("Suspended the VM"), instance=instance) # Raise Exception if VM is poweredOff elif pwr_state == "poweredOff": reason = _("instance is powered off and cannot be suspended.") raise exception.InstanceSuspendFailure(reason=reason) else: LOG.debug(_("VM was already in suspended state. So returning " "without doing anything"), instance=instance) def resume(self, instance): """Resume the specified instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) pwr_state = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "runtime.powerState") if pwr_state.lower() == "suspended": LOG.debug(_("Resuming the VM"), instance=instance) suspend_task = self._session._call_method( self._session._get_vim(), "PowerOnVM_Task", vm_ref) self._session._wait_for_task(suspend_task) LOG.debug(_("Resumed the VM"), instance=instance) else: reason = _("instance is not in a suspended state") raise exception.InstanceResumeFailure(reason=reason) def rescue(self, context, instance, network_info, image_meta): """Rescue the specified instance. - shutdown the instance VM. - spawn a rescue VM (the vm name-label will be instance-N-rescue). """ vm_ref = vm_util.get_vm_ref(self._session, instance) self.power_off(instance) r_instance = copy.deepcopy(instance) instance_name = r_instance['uuid'] + self._rescue_suffix self.spawn(context, r_instance, image_meta, None, None, network_info, instance_name=instance_name, power_on=False) # Attach vmdk to the rescue VM hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") (vmdk_path, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type( hardware_devices, uuid=instance['uuid']) rescue_vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) self._volumeops.attach_disk_to_vm( rescue_vm_ref, r_instance, adapter_type, disk_type, vmdk_path) self._power_on(instance, vm_ref=rescue_vm_ref) def unrescue(self, instance, power_on=True): """Unrescue the specified instance.""" # Get the original vmdk_path vm_ref = vm_util.get_vm_ref(self._session, instance) hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") (vmdk_path, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type( hardware_devices, uuid=instance['uuid']) r_instance = copy.deepcopy(instance) instance_name = r_instance['uuid'] + self._rescue_suffix # detach the original instance disk from the rescue disk vm_rescue_ref = vm_util.get_vm_ref_from_name(self._session, instance_name) hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_rescue_ref, "VirtualMachine", "config.hardware.device") device = vm_util.get_vmdk_volume_disk(hardware_devices, path=vmdk_path) self._power_off_vm_ref(vm_rescue_ref) self._volumeops.detach_disk_from_vm(vm_rescue_ref, r_instance, device) self._destroy_instance(r_instance, None, instance_name=instance_name) if power_on: self._power_on(instance) def _power_off_vm_ref(self, vm_ref): """Power off the specifed vm. :param vm_ref: a reference object to the VM. """ poweroff_task = self._session._call_method( self._session._get_vim(), "PowerOffVM_Task", vm_ref) self._session._wait_for_task(poweroff_task) def power_off(self, instance): """Power off the specified instance. :param instance: nova.objects.instance.Instance """ vm_ref = vm_util.get_vm_ref(self._session, instance) pwr_state = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "runtime.powerState") # Only PoweredOn VMs can be powered off. if pwr_state == "poweredOn": LOG.debug(_("Powering off the VM"), instance=instance) self._power_off_vm_ref(vm_ref) LOG.debug(_("Powered off the VM"), instance=instance) # Raise Exception if VM is suspended elif pwr_state == "suspended": reason = _("instance is suspended and cannot be powered off.") raise exception.InstancePowerOffFailure(reason=reason) else: LOG.debug(_("VM was already in powered off state. So returning " "without doing anything"), instance=instance) def _power_on(self, instance, vm_ref=None): """Power on the specified instance.""" if not vm_ref: vm_ref = vm_util.get_vm_ref(self._session, instance) pwr_state = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "runtime.powerState") if pwr_state == "poweredOn": LOG.debug(_("VM was already in powered on state. So returning " "without doing anything"), instance=instance) # Only PoweredOff and Suspended VMs can be powered on. else: LOG.debug(_("Powering on the VM"), instance=instance) poweron_task = self._session._call_method( self._session._get_vim(), "PowerOnVM_Task", vm_ref) self._session._wait_for_task(poweron_task) LOG.debug(_("Powered on the VM"), instance=instance) def power_on(self, context, instance, network_info, block_device_info): self._power_on(instance) def _get_orig_vm_name_label(self, instance): return instance['uuid'] + '-orig' def _update_instance_progress(self, context, instance, step, total_steps): """Update instance progress percent to reflect current step number """ # Divide the action's workflow into discrete steps and "bump" the # instance's progress field as each step is completed. # # For a first cut this should be fine, however, for large VM images, # the clone disk step begins to dominate the equation. A # better approximation would use the percentage of the VM image that # has been streamed to the destination host. progress = round(float(step) / total_steps * 100) instance_uuid = instance['uuid'] LOG.debug(_("Updating instance '%(instance_uuid)s' progress to" " %(progress)d"), {'instance_uuid': instance_uuid, 'progress': progress}, instance=instance) self._virtapi.instance_update(context, instance_uuid, {'progress': progress}) def migrate_disk_and_power_off(self, context, instance, dest, flavor): """Transfers the disk of a running instance in multiple phases, turning off the instance before the end. """ # 0. Zero out the progress to begin self._update_instance_progress(context, instance, step=0, total_steps=RESIZE_TOTAL_STEPS) vm_ref = vm_util.get_vm_ref(self._session, instance) # Read the host_ref for the destination. If this is None then the # VC will decide on placement host_ref = self._get_host_ref_from_name(dest) # 1. Power off the instance self.power_off(instance) self._update_instance_progress(context, instance, step=1, total_steps=RESIZE_TOTAL_STEPS) # 2. Disassociate the linked vsphere VM from the instance vm_util.disassociate_vmref_from_instance(self._session, instance, vm_ref, suffix=self._migrate_suffix) self._update_instance_progress(context, instance, step=2, total_steps=RESIZE_TOTAL_STEPS) ds_ref = vm_util.get_datastore_ref_and_name( self._session, self._cluster, host_ref, datastore_regex=self._datastore_regex)[0] dc_info = self.get_datacenter_ref_and_name(ds_ref) # 3. Clone the VM for instance vm_util.clone_vmref_for_instance(self._session, instance, vm_ref, host_ref, ds_ref, dc_info.vmFolder) self._update_instance_progress(context, instance, step=3, total_steps=RESIZE_TOTAL_STEPS) def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" # Destroy the original VM. The vm_ref needs to be searched using the # instance['uuid'] + self._migrate_suffix as the identifier. We will # not get the vm when searched using the instanceUuid but rather will # be found using the uuid buried in the extraConfig vm_ref = vm_util.search_vm_ref_by_identifier(self._session, instance['uuid'] + self._migrate_suffix) if vm_ref is None: LOG.debug(_("instance not present"), instance=instance) return try: LOG.debug(_("Destroying the VM"), instance=instance) destroy_task = self._session._call_method( self._session._get_vim(), "Destroy_Task", vm_ref) self._session._wait_for_task(destroy_task) LOG.debug(_("Destroyed the VM"), instance=instance) except Exception as excep: LOG.warn(_("In vmwareapi:vmops:confirm_migration, got this " "exception while destroying the VM: %s") % str(excep)) def finish_revert_migration(self, context, instance, network_info, block_device_info, power_on=True): """Finish reverting a resize.""" vm_util.associate_vmref_for_instance(self._session, instance, suffix=self._migrate_suffix) if power_on: self._power_on(instance) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None, power_on=True): """Completes a resize, turning on the migrated instance.""" if resize_instance: client_factory = self._session._get_vim().client.factory vm_ref = vm_util.get_vm_ref(self._session, instance) vm_resize_spec = vm_util.get_vm_resize_spec(client_factory, instance) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vm_resize_spec) self._session._wait_for_task(reconfig_task) # 4. Start VM if power_on: self._power_on(instance) self._update_instance_progress(context, instance, step=4, total_steps=RESIZE_TOTAL_STEPS) def live_migration(self, context, instance_ref, dest, post_method, recover_method, block_migration=False): """Spawning live_migration operation for distributing high-load.""" vm_ref = vm_util.get_vm_ref(self._session, instance_ref) host_ref = self._get_host_ref_from_name(dest) if host_ref is None: raise exception.HostNotFound(host=dest) LOG.debug(_("Migrating VM to host %s") % dest, instance=instance_ref) try: vm_migrate_task = self._session._call_method( self._session._get_vim(), "MigrateVM_Task", vm_ref, host=host_ref, priority="defaultPriority") self._session._wait_for_task(vm_migrate_task) except Exception: with excutils.save_and_reraise_exception(): recover_method(context, instance_ref, dest, block_migration) post_method(context, instance_ref, dest, block_migration) LOG.debug(_("Migrated VM to host %s") % dest, instance=instance_ref) def poll_rebooting_instances(self, timeout, instances): """Poll for rebooting instances.""" ctxt = nova_context.get_admin_context() instances_info = dict(instance_count=len(instances), timeout=timeout) if instances_info["instance_count"] > 0: LOG.info(_("Found %(instance_count)d hung reboots " "older than %(timeout)d seconds") % instances_info) for instance in instances: LOG.info(_("Automatically hard rebooting"), instance=instance) self.compute_api.reboot(ctxt, instance, "HARD") def get_info(self, instance): """Return data about the VM instance.""" vm_ref = vm_util.get_vm_ref(self._session, instance) lst_properties = ["summary.config.numCpu", "summary.config.memorySizeMB", "runtime.powerState"] vm_props = self._session._call_method(vim_util, "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) query = {'summary.config.numCpu': 0, 'summary.config.memorySizeMB': 0, 'runtime.powerState': None} self._get_values_from_object_properties(vm_props, query) max_mem = int(query['summary.config.memorySizeMB']) * 1024 return {'state': VMWARE_POWER_STATES[query['runtime.powerState']], 'max_mem': max_mem, 'mem': max_mem, 'num_cpu': int(query['summary.config.numCpu']), 'cpu_time': 0} def _get_diagnostic_from_object_properties(self, props, wanted_props): diagnostics = {} while props: for elem in props.objects: for prop in elem.propSet: if prop.name in wanted_props: prop_dict = vim.object_to_dict(prop.val, list_depth=1) diagnostics.update(prop_dict) token = vm_util._get_token(props) if not token: break props = self._session._call_method(vim_util, "continue_to_get_objects", token) return diagnostics def get_diagnostics(self, instance): """Return data about VM diagnostics.""" vm_ref = vm_util.get_vm_ref(self._session, instance) lst_properties = ["summary.config", "summary.quickStats", "summary.runtime"] vm_props = self._session._call_method(vim_util, "get_object_properties", None, vm_ref, "VirtualMachine", lst_properties) data = self._get_diagnostic_from_object_properties(vm_props, set(lst_properties)) # Add a namespace to all of the diagnostsics return dict([('vmware:' + k, v) for k, v in data.items()]) def get_vnc_console(self, instance): """Return connection info for a vnc console.""" vm_ref = vm_util.get_vm_ref(self._session, instance) opt_value = self._session._call_method(vim_util, 'get_dynamic_property', vm_ref, 'VirtualMachine', vm_util.VNC_CONFIG_KEY) if opt_value: port = int(opt_value.value) else: raise exception.ConsoleTypeUnavailable(console_type='vnc') return {'host': CONF.vmware.host_ip, 'port': port, 'internal_access_path': None} def get_vnc_console_vcenter(self, instance): """Return connection info for a vnc console using vCenter logic.""" # vCenter does not run virtual machines and does not run # a VNC proxy. Instead, you need to tell OpenStack to talk # directly to the ESX host running the VM you are attempting # to connect to via VNC. vnc_console = self.get_vnc_console(instance) host_name = vm_util.get_host_name_for_vm( self._session, instance) vnc_console['host'] = host_name # NOTE: VM can move hosts in some situations. Debug for admins. LOG.debug(_("VM %(uuid)s is currently on host %(host_name)s"), {'uuid': instance['name'], 'host_name': host_name}) return vnc_console @staticmethod def _get_machine_id_str(network_info): machine_id_str = '' for vif in network_info: # TODO(vish): add support for dns2 # TODO(sateesh): add support for injection of ipv6 configuration network = vif['network'] ip_v4 = netmask_v4 = gateway_v4 = broadcast_v4 = dns = None subnets_v4 = [s for s in network['subnets'] if s['version'] == 4] if len(subnets_v4) > 0: if len(subnets_v4[0]['ips']) > 0: ip_v4 = subnets_v4[0]['ips'][0] if len(subnets_v4[0]['dns']) > 0: dns = subnets_v4[0]['dns'][0]['address'] netmask_v4 = str(subnets_v4[0].as_netaddr().netmask) gateway_v4 = subnets_v4[0]['gateway']['address'] broadcast_v4 = str(subnets_v4[0].as_netaddr().broadcast) interface_str = ";".join([vif['address'], ip_v4 and ip_v4['address'] or '', netmask_v4 or '', gateway_v4 or '', broadcast_v4 or '', dns or '']) machine_id_str = machine_id_str + interface_str + '#' return machine_id_str def _set_machine_id(self, client_factory, instance, network_info): """Set the machine id of the VM for guest tools to pick up and reconfigure the network interfaces. """ vm_ref = vm_util.get_vm_ref(self._session, instance) machine_id_change_spec = vm_util.get_machine_id_change_spec( client_factory, self._get_machine_id_str(network_info)) LOG.debug(_("Reconfiguring VM instance to set the machine id"), instance=instance) reconfig_task = self._session._call_method(self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=machine_id_change_spec) self._session._wait_for_task(reconfig_task) LOG.debug(_("Reconfigured VM instance to set the machine id"), instance=instance) @utils.synchronized('vmware.get_and_set_vnc_port') def _get_and_set_vnc_config(self, client_factory, instance): """Set the vnc configuration of the VM.""" port = vm_util.get_vnc_port(self._session) vm_ref = vm_util.get_vm_ref(self._session, instance) vnc_config_spec = vm_util.get_vnc_config_spec( client_factory, port) LOG.debug(_("Reconfiguring VM instance to enable vnc on " "port - %(port)s") % {'port': port}, instance=instance) reconfig_task = self._session._call_method(self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vnc_config_spec) self._session._wait_for_task(reconfig_task) LOG.debug(_("Reconfigured VM instance to enable vnc on " "port - %(port)s") % {'port': port}, instance=instance) def _get_ds_browser(self, ds_ref): ds_browser = self._datastore_browser_mapping.get(ds_ref.value) if not ds_browser: ds_browser = self._session._call_method( vim_util, "get_dynamic_property", ds_ref, "Datastore", "browser") self._datastore_browser_mapping[ds_ref.value] = ds_browser return ds_browser def get_datacenter_ref_and_name(self, ds_ref): """Get the datacenter name and the reference.""" map = self._datastore_dc_mapping.get(ds_ref.value) if not map: dc_obj = self._session._call_method(vim_util, "get_objects", "Datacenter", ["name"]) vm_util._cancel_retrieve_if_necessary(self._session, dc_obj) map = DcInfo(ref=dc_obj.objects[0].obj, name=dc_obj.objects[0].propSet[0].val, vmFolder=self._get_vmfolder_ref()) self._datastore_dc_mapping[ds_ref.value] = map return map def _get_host_ref_from_name(self, host_name): """Get reference to the host with the name specified.""" host_objs = self._session._call_method(vim_util, "get_objects", "HostSystem", ["name"]) vm_util._cancel_retrieve_if_necessary(self._session, host_objs) for host in host_objs: if hasattr(host, 'propSet'): if host.propSet[0].val == host_name: return host.obj return None def _get_vmfolder_ref(self): """Get the Vm folder ref from the datacenter.""" dc_objs = self._session._call_method(vim_util, "get_objects", "Datacenter", ["vmFolder"]) vm_util._cancel_retrieve_if_necessary(self._session, dc_objs) # There is only one default datacenter in a standalone ESX host vm_folder_ref = dc_objs.objects[0].propSet[0].val return vm_folder_ref def _create_folder_if_missing(self, ds_name, ds_ref, folder): """Create a folder if it does not exist. Currently there are two folder that are required on the datastore - base folder - the folder to store cached images - temp folder - the folder used for snapshot management and image uploading This method is aimed to be used for the management of those folders to ensure that they are created if they are missing. The ds_util method mkdir will be used to check if the folder exists. If this throws and exception 'FileAlreadyExistsException' then the folder already exists on the datastore. """ path = ds_util.build_datastore_path(ds_name, folder) dc_info = self.get_datacenter_ref_and_name(ds_ref) try: ds_util.mkdir(self._session, path, dc_info.ref) LOG.debug(_("Folder %s created."), path) except error_util.FileAlreadyExistsException: # NOTE(hartsocks): if the folder already exists, that # just means the folder was prepped by another process. pass def check_cache_folder(self, ds_name, ds_ref): """Check that the cache folder exists.""" self._create_folder_if_missing(ds_name, ds_ref, self._base_folder) def check_temp_folder(self, ds_name, ds_ref): """Check that the temp folder exists.""" self._create_folder_if_missing(ds_name, ds_ref, self._tmp_folder) def _check_if_folder_file_exists(self, ds_browser, ds_ref, ds_name, folder_name, file_name): # Ensure that the cache folder exists self.check_cache_folder(ds_name, ds_ref) # Check if the file exists or not. folder_path = ds_util.build_datastore_path(ds_name, folder_name) file_exists = ds_util.file_exists(self._session, ds_browser, folder_path, file_name) return file_exists def inject_network_info(self, instance, network_info): """inject network info for specified instance.""" # Set the machine.id parameter of the instance to inject # the NIC configuration inside the VM client_factory = self._session._get_vim().client.factory self._set_machine_id(client_factory, instance, network_info) def manage_image_cache(self, context, instances): if not CONF.remove_unused_base_images: LOG.debug(_("Image aging disabled. Aging will not be done.")) return datastores = vm_util.get_available_datastores(self._session, self._cluster, self._datastore_regex) datastores_info = [] for ds in datastores: ds_info = self.get_datacenter_ref_and_name(ds['ref']) datastores_info.append((ds, ds_info)) self._imagecache.update(context, instances, datastores_info) def _get_valid_vms_from_retrieve_result(self, retrieve_result): """Returns list of valid vms from RetrieveResult object.""" lst_vm_names = [] while retrieve_result: token = vm_util._get_token(retrieve_result) for vm in retrieve_result.objects: vm_name = None conn_state = None for prop in vm.propSet: if prop.name == "name": vm_name = prop.val elif prop.name == "runtime.connectionState": conn_state = prop.val # Ignoring the orphaned or inaccessible VMs if conn_state not in ["orphaned", "inaccessible"]: lst_vm_names.append(vm_name) if token: retrieve_result = self._session._call_method(vim_util, "continue_to_get_objects", token) else: break return lst_vm_names class VMwareVCVMOps(VMwareVMOps): """Management class for VM-related tasks. Contains specializations to account for differences in vSphere API behavior when invoked on Virtual Center instead of ESX host. """ def get_copy_virtual_disk_spec(self, client_factory, adapter_type, disk_type): LOG.debug(_("Will copy while retaining adapter type " "%(adapter_type)s and disk type %(disk_type)s") % {"disk_type": disk_type, "adapter_type": adapter_type}) # Passing of the destination copy spec is not supported when # VirtualDiskManager.CopyVirtualDisk is called on VC. The behavior of a # spec-less copy is to consolidate to the target disk while keeping its # disk and adapter type unchanged. def _update_datacenter_cache_from_objects(self, dcs): """Updates the datastore/datacenter cache.""" while dcs: token = vm_util._get_token(dcs) for dco in dcs.objects: dc_ref = dco.obj ds_refs = [] prop_dict = vm_util.propset_dict(dco.propSet) name = prop_dict.get('name') vmFolder = prop_dict.get('vmFolder') datastore_refs = prop_dict.get('datastore') if datastore_refs: datastore_refs = datastore_refs.ManagedObjectReference for ds in datastore_refs: ds_refs.append(ds.value) else: LOG.debug("Datacenter %s doesn't have any datastore " "associated with it, ignoring it", name) for ds_ref in ds_refs: self._datastore_dc_mapping[ds_ref] = DcInfo(ref=dc_ref, name=name, vmFolder=vmFolder) if token: dcs = self._session._call_method(vim_util, "continue_to_get_objects", token) else: break def get_datacenter_ref_and_name(self, ds_ref): """Get the datacenter name and the reference.""" dc_info = self._datastore_dc_mapping.get(ds_ref.value) if not dc_info: dcs = self._session._call_method(vim_util, "get_objects", "Datacenter", ["name", "datastore", "vmFolder"]) self._update_datacenter_cache_from_objects(dcs) dc_info = self._datastore_dc_mapping.get(ds_ref.value) return dc_info def list_instances(self): """Lists the VM instances that are registered with vCenter cluster.""" properties = ['name', 'runtime.connectionState'] LOG.debug(_("Getting list of instances from cluster %s"), self._cluster) vms = [] root_res_pool = self._session._call_method( vim_util, "get_dynamic_property", self._cluster, 'ClusterComputeResource', 'resourcePool') if root_res_pool: vms = self._session._call_method( vim_util, 'get_inner_objects', root_res_pool, 'vm', 'VirtualMachine', properties) lst_vm_names = self._get_valid_vms_from_retrieve_result(vms) LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names))) return lst_vm_names nova-2014.1.5/nova/virt/vmwareapi/ds_util.py0000664000567000056700000001302512540642544022015 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Datastore utility functions """ from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import vm_util LOG = logging.getLogger(__name__) def build_datastore_path(datastore_name, path): """Build the datastore compliant path.""" return "[%s] %s" % (datastore_name, path) def split_datastore_path(datastore_path): """Return the datastore and path from a datastore_path. Split the VMware style datastore path to get the Datastore name and the entity path. """ spl = datastore_path.split('[', 1)[1].split(']', 1) path = "" if len(spl) == 1: datastore_name = spl[0] else: datastore_name, path = spl return datastore_name, path.strip() def file_delete(session, datastore_path, dc_ref): LOG.debug(_("Deleting the datastore file %s"), datastore_path) vim = session._get_vim() file_delete_task = session._call_method( session._get_vim(), "DeleteDatastoreFile_Task", vim.get_service_content().fileManager, name=datastore_path, datacenter=dc_ref) session._wait_for_task(file_delete_task) LOG.debug(_("Deleted the datastore file")) def file_move(session, dc_ref, src_file, dst_file): """Moves the source file or folder to the destination. The list of possible faults that the server can return on error include: - CannotAccessFile: Thrown if the source file or folder cannot be moved because of insufficient permissions. - FileAlreadyExists: Thrown if a file with the given name already exists at the destination. - FileFault: Thrown if there is a generic file error - FileLocked: Thrown if the source file or folder is currently locked or in use. - FileNotFound: Thrown if the file or folder specified by sourceName is not found. - InvalidDatastore: Thrown if the operation cannot be performed on the source or destination datastores. - NoDiskSpace: Thrown if there is not enough space available on the destination datastore. - RuntimeFault: Thrown if any type of runtime fault is thrown that is not covered by the other faults; for example, a communication error. """ LOG.debug(_("Moving file from %(src)s to %(dst)s."), {'src': src_file, 'dst': dst_file}) vim = session._get_vim() move_task = session._call_method( session._get_vim(), "MoveDatastoreFile_Task", vim.get_service_content().fileManager, sourceName=src_file, sourceDatacenter=dc_ref, destinationName=dst_file, destinationDatacenter=dc_ref) session._wait_for_task(move_task) LOG.debug(_("File moved")) def file_exists(session, ds_browser, ds_path, file_name): """Check if the file exists on the datastore.""" client_factory = session._get_vim().client.factory search_spec = vm_util.search_datastore_spec(client_factory, file_name) search_task = session._call_method(session._get_vim(), "SearchDatastore_Task", ds_browser, datastorePath=ds_path, searchSpec=search_spec) try: task_info = session._wait_for_task(search_task) except error_util.FileNotFoundException: return False file_exists = (getattr(task_info.result, 'file', False) and task_info.result.file[0].path == file_name) return file_exists def mkdir(session, ds_path, dc_ref): """Creates a directory at the path specified. If it is just "NAME", then a directory with this name is created at the topmost level of the DataStore. """ LOG.debug(_("Creating directory with path %s"), ds_path) session._call_method(session._get_vim(), "MakeDirectory", session._get_vim().get_service_content().fileManager, name=ds_path, datacenter=dc_ref, createParentDirectories=True) LOG.debug(_("Created directory with path %s"), ds_path) def get_sub_folders(session, ds_browser, ds_path): """Return a set of subfolders for a path on a datastore. If the path does not exist then an empty set is returned. """ client_factory = session._get_vim().client.factory search_task = session._call_method( session._get_vim(), "SearchDatastore_Task", ds_browser, datastorePath=ds_path) try: task_info = session._wait_for_task(search_task) except error_util.FileNotFoundException: return set() # populate the folder entries if hasattr(task_info.result, 'file'): return set([file.path for file in task_info.result.file]) return set() nova-2014.1.5/nova/virt/vmwareapi/__init__.py0000664000567000056700000000162012540642544022107 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`vmwareapi` -- Nova support for VMware ESX/vCenter through VMware API. """ # NOTE(sdague) for nicer compute_driver specification from nova.virt.vmwareapi import driver VMwareESXDriver = driver.VMwareESXDriver VMwareVCDriver = driver.VMwareVCDriver nova-2014.1.5/nova/virt/vmwareapi/vmware_images.py0000664000567000056700000002003612540642544023200 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility functions for Image transfer. """ import os from nova import exception from nova.image import glance from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import io_util from nova.virt.vmwareapi import read_write_util LOG = logging.getLogger(__name__) QUEUE_BUFFER_SIZE = 10 def start_transfer(context, read_file_handle, data_size, write_file_handle=None, image_service=None, image_id=None, image_meta=None): """Start the data transfer from the reader to the writer. Reader writes to the pipe and the writer reads from the pipe. This means that the total transfer time boils down to the slower of the read/write and not the addition of the two times. """ if not image_meta: image_meta = {} # The pipe that acts as an intermediate store of data for reader to write # to and writer to grab from. thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size) # The read thread. In case of glance it is the instance of the # GlanceFileRead class. The glance client read returns an iterator # and this class wraps that iterator to provide datachunks in calls # to read. read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe) # In case of Glance - VMware transfer, we just need a handle to the # HTTP Connection that is to send transfer data to the VMware datastore. if write_file_handle: write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle) # In case of VMware - Glance transfer, we relinquish VMware HTTP file read # handle to Glance Client instance, but to be sure of the transfer we need # to be sure of the status of the image on glance changing to active. # The GlanceWriteThread handles the same for us. elif image_service and image_id: write_thread = io_util.GlanceWriteThread(context, thread_safe_pipe, image_service, image_id, image_meta) # Start the read and write threads. read_event = read_thread.start() write_event = write_thread.start() try: # Wait on the read and write events to signal their end read_event.wait() write_event.wait() except Exception as exc: # In case of any of the reads or writes raising an exception, # stop the threads so that we un-necessarily don't keep the other one # waiting. read_thread.stop() write_thread.stop() # Log and raise the exception. LOG.exception(exc) raise exception.NovaException(exc) finally: # No matter what, try closing the read and write handles, if it so # applies. read_file_handle.close() if write_file_handle: write_file_handle.close() def upload_iso_to_datastore(iso_path, instance, **kwargs): LOG.debug(_("Uploading iso %s to datastore") % iso_path, instance=instance) with open(iso_path, 'r') as iso_file: write_file_handle = read_write_util.VMwareHTTPWriteFile( kwargs.get("host"), kwargs.get("data_center_name"), kwargs.get("datastore_name"), kwargs.get("cookies"), kwargs.get("file_path"), os.fstat(iso_file.fileno()).st_size) LOG.debug(_("Uploading iso of size : %s ") % os.fstat(iso_file.fileno()).st_size) block_size = 0x10000 data = iso_file.read(block_size) while len(data) > 0: write_file_handle.write(data) data = iso_file.read(block_size) write_file_handle.close() LOG.debug(_("Uploaded iso %s to datastore") % iso_path, instance=instance) def fetch_image(context, image, instance, **kwargs): """Download image from the glance image server.""" LOG.debug(_("Downloading image %s from glance image server") % image, instance=instance) (image_service, image_id) = glance.get_remote_image_service(context, image) metadata = image_service.show(context, image_id) file_size = int(metadata['size']) read_iter = image_service.download(context, image_id) read_file_handle = read_write_util.GlanceFileRead(read_iter) write_file_handle = read_write_util.VMwareHTTPWriteFile( kwargs.get("host"), kwargs.get("data_center_name"), kwargs.get("datastore_name"), kwargs.get("cookies"), kwargs.get("file_path"), file_size) start_transfer(context, read_file_handle, file_size, write_file_handle=write_file_handle) LOG.debug(_("Downloaded image %s from glance image server") % image, instance=instance) def upload_image(context, image, instance, **kwargs): """Upload the snapshotted vm disk file to Glance image server.""" LOG.debug(_("Uploading image %s to the Glance image server") % image, instance=instance) read_file_handle = read_write_util.VMwareHTTPReadFile( kwargs.get("host"), kwargs.get("data_center_name"), kwargs.get("datastore_name"), kwargs.get("cookies"), kwargs.get("file_path")) file_size = read_file_handle.get_size() (image_service, image_id) = glance.get_remote_image_service(context, image) metadata = image_service.show(context, image_id) # The properties and other fields that we need to set for the image. image_metadata = {"disk_format": "vmdk", "is_public": "false", "name": metadata['name'], "status": "active", "container_format": "bare", "size": file_size, "properties": {"vmware_adaptertype": kwargs.get("adapter_type"), "vmware_disktype": kwargs.get("disk_type"), "vmware_ostype": kwargs.get("os_type"), "vmware_image_version": kwargs.get("image_version"), "owner_id": instance['project_id']}} start_transfer(context, read_file_handle, file_size, image_service=image_service, image_id=image_id, image_meta=image_metadata) LOG.debug(_("Uploaded image %s to the Glance image server") % image, instance=instance) def get_vmdk_size_and_properties(context, image, instance): """Get size of the vmdk file that is to be downloaded for attach in spawn. Need this to create the dummy virtual disk for the meta-data file. The geometry of the disk created depends on the size. """ LOG.debug(_("Getting image size for the image %s") % image, instance=instance) (image_service, image_id) = glance.get_remote_image_service(context, image) meta_data = image_service.show(context, image_id) size, properties = meta_data["size"], meta_data["properties"] LOG.debug(_("Got image size of %(size)s for the image %(image)s"), {'size': size, 'image': image}, instance=instance) return size, properties nova-2014.1.5/nova/virt/vmwareapi/io_util.py0000664000567000056700000001434112540642544022020 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 VMware, Inc. # Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility classes for defining the time saving transfer of data from the reader to the write using a LightQueue as a Pipe between the reader and the writer. """ from eventlet import event from eventlet import greenthread from eventlet import queue from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) IO_THREAD_SLEEP_TIME = .01 GLANCE_POLL_INTERVAL = 5 class ThreadSafePipe(queue.LightQueue): """The pipe to hold the data which the reader writes to and the writer reads from. """ def __init__(self, maxsize, transfer_size): queue.LightQueue.__init__(self, maxsize) self.transfer_size = transfer_size self.transferred = 0 def read(self, chunk_size): """Read data from the pipe. Chunksize if ignored for we have ensured that the data chunks written to the pipe by readers is the same as the chunks asked for by the Writer. """ if self.transferred < self.transfer_size: data_item = self.get() self.transferred += len(data_item) return data_item else: return "" def write(self, data): """Put a data item in the pipe.""" self.put(data) def seek(self, offset, whence=0): """Set the file's current position at the offset.""" pass def tell(self): """Get size of the file to be read.""" return self.transfer_size def close(self): """A place-holder to maintain consistency.""" pass class GlanceWriteThread(object): """Ensures that image data is written to in the glance client and that it is in correct ('active')state. """ def __init__(self, context, input, image_service, image_id, image_meta=None): if not image_meta: image_meta = {} self.context = context self.input = input self.image_service = image_service self.image_id = image_id self.image_meta = image_meta self._running = False def start(self): self.done = event.Event() def _inner(): """Function to do the image data transfer through an update and thereon checks if the state is 'active'. """ try: self.image_service.update(self.context, self.image_id, self.image_meta, data=self.input) self._running = True except exception.ImageNotAuthorized as exc: self.done.send_exception(exc) while self._running: try: image_meta = self.image_service.show(self.context, self.image_id) image_status = image_meta.get("status") if image_status == "active": self.stop() self.done.send(True) # If the state is killed, then raise an exception. elif image_status == "killed": self.stop() msg = (_("Glance image %s is in killed state") % self.image_id) LOG.error(msg) self.done.send_exception(exception.NovaException(msg)) elif image_status in ["saving", "queued"]: greenthread.sleep(GLANCE_POLL_INTERVAL) else: self.stop() msg = _("Glance image " "%(image_id)s is in unknown state " "- %(state)s") % { "image_id": self.image_id, "state": image_status} LOG.error(msg) self.done.send_exception(exception.NovaException(msg)) except Exception as exc: self.stop() self.done.send_exception(exc) greenthread.spawn(_inner) return self.done def stop(self): self._running = False def wait(self): return self.done.wait() def close(self): pass class IOThread(object): """Class that reads chunks from the input file and writes them to the output file till the transfer is completely done. """ def __init__(self, input, output): self.input = input self.output = output self._running = False self.got_exception = False def start(self): self.done = event.Event() def _inner(): """Read data from the input and write the same to the output until the transfer completes. """ self._running = True while self._running: try: data = self.input.read(None) if not data: self.stop() self.done.send(True) self.output.write(data) greenthread.sleep(IO_THREAD_SLEEP_TIME) except Exception as exc: self.stop() LOG.exception(exc) self.done.send_exception(exc) greenthread.spawn(_inner) return self.done def stop(self): self._running = False def wait(self): return self.done.wait() nova-2014.1.5/nova/virt/vmwareapi/volumeops.py0000664000567000056700000005337112540642544022413 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for Storage-related functions (attach, detach, etc). """ from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import vim from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util from nova.virt.vmwareapi import volume_util CONF = cfg.CONF LOG = logging.getLogger(__name__) class VMwareVolumeOps(object): """Management class for Volume-related tasks.""" def __init__(self, session, cluster=None, vc_support=False): self._session = session self._cluster = cluster self._vc_support = vc_support def attach_disk_to_vm(self, vm_ref, instance, adapter_type, disk_type, vmdk_path=None, disk_size=None, linked_clone=False, device_name=None): """Attach disk to VM by reconfiguration.""" instance_name = instance['name'] client_factory = self._session._get_vim().client.factory devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") (controller_key, unit_number, controller_spec) = vm_util.allocate_controller_key_and_unit_number( client_factory, devices, adapter_type) vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec( client_factory, disk_type, vmdk_path, disk_size, linked_clone, controller_key, unit_number, device_name) if controller_spec: vmdk_attach_config_spec.deviceChange.append(controller_spec) LOG.debug(_("Reconfiguring VM instance %(instance_name)s to attach " "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s"), {'instance_name': instance_name, 'vmdk_path': vmdk_path, 'device_name': device_name, 'disk_type': disk_type}, instance=instance) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vmdk_attach_config_spec) self._session._wait_for_task(reconfig_task) LOG.debug(_("Reconfigured VM instance %(instance_name)s to attach " "disk %(vmdk_path)s or device %(device_name)s with type " "%(disk_type)s"), {'instance_name': instance_name, 'vmdk_path': vmdk_path, 'device_name': device_name, 'disk_type': disk_type}, instance=instance) def _update_volume_details(self, vm_ref, instance, volume_uuid): # Store the uuid of the volume_device hw_devices = self._session._call_method(vim_util, 'get_dynamic_property', vm_ref, 'VirtualMachine', 'config.hardware.device') device_uuid = vm_util.get_vmdk_backed_disk_uuid(hw_devices, volume_uuid) volume_option = 'volume-%s' % volume_uuid extra_opts = {volume_option: device_uuid} client_factory = self._session._get_vim().client.factory extra_config_specs = vm_util.get_vm_extra_config_spec( client_factory, extra_opts) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=extra_config_specs) self._session._wait_for_task(reconfig_task) def _get_volume_uuid(self, vm_ref, volume_uuid): prop = 'config.extraConfig["volume-%s"]' % volume_uuid opt_val = self._session._call_method(vim_util, 'get_dynamic_property', vm_ref, 'VirtualMachine', prop) if opt_val is not None: return opt_val.value def detach_disk_from_vm(self, vm_ref, instance, device, destroy_disk=False): """Detach disk from VM by reconfiguration.""" instance_name = instance['name'] client_factory = self._session._get_vim().client.factory vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec( client_factory, device, destroy_disk) disk_key = device.key LOG.debug(_("Reconfiguring VM instance %(instance_name)s to detach " "disk %(disk_key)s"), {'instance_name': instance_name, 'disk_key': disk_key}, instance=instance) reconfig_task = self._session._call_method( self._session._get_vim(), "ReconfigVM_Task", vm_ref, spec=vmdk_detach_config_spec) self._session._wait_for_task(reconfig_task) LOG.debug(_("Reconfigured VM instance %(instance_name)s to detach " "disk %(disk_key)s"), {'instance_name': instance_name, 'disk_key': disk_key}, instance=instance) def discover_st(self, data): """Discover iSCSI targets.""" target_portal = data['target_portal'] target_iqn = data['target_iqn'] LOG.debug(_("Discovering iSCSI target %(target_iqn)s from " "%(target_portal)s."), {'target_iqn': target_iqn, 'target_portal': target_portal}) device_name, uuid = volume_util.find_st(self._session, data, self._cluster) if device_name: LOG.debug(_("Storage target found. No need to discover")) return (device_name, uuid) # Rescan iSCSI HBA with iscsi target host volume_util.rescan_iscsi_hba(self._session, self._cluster, target_portal) # Find iSCSI Target again device_name, uuid = volume_util.find_st(self._session, data, self._cluster) if device_name: LOG.debug(_("Discovered iSCSI target %(target_iqn)s from " "%(target_portal)s."), {'target_iqn': target_iqn, 'target_portal': target_portal}) else: LOG.debug(_("Unable to discovered iSCSI target %(target_iqn)s " "from %(target_portal)s."), {'target_iqn': target_iqn, 'target_portal': target_portal}) return (device_name, uuid) def get_volume_connector(self, instance): """Return volume connector information.""" try: vm_ref = vm_util.get_vm_ref(self._session, instance) except exception.InstanceNotFound: vm_ref = None iqn = volume_util.get_host_iqn(self._session, self._cluster) connector = {'ip': CONF.vmware.host_ip, 'initiator': iqn, 'host': CONF.vmware.host_ip} if vm_ref: connector['instance'] = vm_ref.value return connector def _get_volume_ref(self, volume_ref_name): """Get the volume moref from the ref name.""" return vim.get_moref(volume_ref_name, 'VirtualMachine') def _get_vmdk_base_volume_device(self, volume_ref): # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", volume_ref, "VirtualMachine", "config.hardware.device") return vm_util.get_vmdk_volume_disk(hardware_devices) def _attach_volume_vmdk(self, connection_info, instance, mountpoint): """Attach vmdk volume storage to VM instance.""" instance_name = instance['name'] vm_ref = vm_util.get_vm_ref(self._session, instance) data = connection_info['data'] # Get volume details from volume ref volume_ref = self._get_volume_ref(data['volume']) volume_device = self._get_vmdk_base_volume_device(volume_ref) volume_vmdk_path = volume_device.backing.fileName # Get details required for adding disk device such as # adapter_type, disk_type hw_devices = self._session._call_method(vim_util, 'get_dynamic_property', vm_ref, 'VirtualMachine', 'config.hardware.device') (vmdk_file_path, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type(hw_devices) # Attach the disk to virtual machine instance self.attach_disk_to_vm(vm_ref, instance, adapter_type, disk_type, vmdk_path=volume_vmdk_path) # Store the uuid of the volume_device self._update_volume_details(vm_ref, instance, data['volume_id']) LOG.info(_("Mountpoint %(mountpoint)s attached to " "instance %(instance_name)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance) def _attach_volume_iscsi(self, connection_info, instance, mountpoint): """Attach iscsi volume storage to VM instance.""" instance_name = instance['name'] vm_ref = vm_util.get_vm_ref(self._session, instance) # Attach Volume to VM LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s, " "%(mountpoint)s"), {'connection_info': connection_info, 'instance_name': instance_name, 'mountpoint': mountpoint}, instance=instance) data = connection_info['data'] # Discover iSCSI Target device_name, uuid = self.discover_st(data) if device_name is None: raise volume_util.StorageError(_("Unable to find iSCSI Target")) # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") (vmdk_file_path, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type(hardware_devices) self.attach_disk_to_vm(vm_ref, instance, adapter_type, 'rdmp', device_name=device_name) LOG.info(_("Mountpoint %(mountpoint)s attached to " "instance %(instance_name)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance) def attach_volume(self, connection_info, instance, mountpoint): """Attach volume storage to VM instance.""" driver_type = connection_info['driver_volume_type'] LOG.debug(_("Volume attach. Driver type: %s"), driver_type, instance=instance) if driver_type == 'vmdk': self._attach_volume_vmdk(connection_info, instance, mountpoint) elif driver_type == 'iscsi': self._attach_volume_iscsi(connection_info, instance, mountpoint) else: raise exception.VolumeDriverNotFound(driver_type=driver_type) def _relocate_vmdk_volume(self, volume_ref, res_pool, datastore): """Relocate the volume. The move type will be moveAllDiskBackingsAndAllowSharing. """ client_factory = self._session._get_vim().client.factory spec = vm_util.relocate_vm_spec(client_factory, datastore=datastore) spec.pool = res_pool task = self._session._call_method(self._session._get_vim(), "RelocateVM_Task", volume_ref, spec=spec) self._session._wait_for_task(task) def _get_res_pool_of_vm(self, vm_ref): """Get resource pool to which the VM belongs.""" # Get the host, the VM belongs to host = self._session._call_method(vim_util, 'get_dynamic_property', vm_ref, 'VirtualMachine', 'runtime').host # Get the compute resource, the host belongs to compute_res = self._session._call_method(vim_util, 'get_dynamic_property', host, 'HostSystem', 'parent') # Get resource pool from the compute resource return self._session._call_method(vim_util, 'get_dynamic_property', compute_res, compute_res._type, 'resourcePool') def _consolidate_vmdk_volume(self, instance, vm_ref, device, volume_ref): """Consolidate volume backing VMDK files if needed. The volume's VMDK file attached to an instance can be moved by SDRS if enabled on the cluster. By this the VMDK files can get copied onto another datastore and the copy on this new location will be the latest version of the VMDK file. So at the time of detach, we need to consolidate the current backing VMDK file with the VMDK file in the new location. We need to ensure that the VMDK chain (snapshots) remains intact during the consolidation. SDRS retains the chain when it copies VMDK files over, so for consolidation we relocate the backing with move option as moveAllDiskBackingsAndAllowSharing and then delete the older version of the VMDK file attaching the new version VMDK file. In the case of a volume boot the we need to ensure that the volume is on the datastore of the instance. """ # Consolidation only supported with VC driver if not self._vc_support: return original_device = self._get_vmdk_base_volume_device(volume_ref) original_device_path = original_device.backing.fileName current_device_path = device.backing.fileName if original_device_path == current_device_path: # The volume is not moved from its original location. # No consolidation is required. LOG.debug(_("The volume has not been displaced from " "its original location: %s. No consolidation " "needed."), current_device_path) return # The volume has been moved from its original location. # Need to consolidate the VMDK files. LOG.info(_("The volume's backing has been relocated to %s. Need to " "consolidate backing disk file."), current_device_path) # Pick the resource pool on which the instance resides. # Move the volume to the datastore where the new VMDK file is present. res_pool = self._get_res_pool_of_vm(vm_ref) datastore = device.backing.datastore self._relocate_vmdk_volume(volume_ref, res_pool, datastore) # Delete the original disk from the volume_ref self.detach_disk_from_vm(volume_ref, instance, original_device, destroy_disk=True) # Attach the current disk to the volume_ref # Get details required for adding disk device such as # adapter_type, disk_type hw_devices = self._session._call_method(vim_util, 'get_dynamic_property', volume_ref, 'VirtualMachine', 'config.hardware.device') (vmdk_file_path, adapter_type, disk_type) = vm_util.get_vmdk_path_and_adapter_type(hw_devices) # Attach the current volume to the volume_ref self.attach_disk_to_vm(volume_ref, instance, adapter_type, disk_type, vmdk_path=current_device_path) def _get_vmdk_backed_disk_device(self, vm_ref, connection_info_data): # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") # Get disk uuid disk_uuid = self._get_volume_uuid(vm_ref, connection_info_data['volume_id']) device = vm_util.get_vmdk_backed_disk_device(hardware_devices, disk_uuid) if not device: raise volume_util.StorageError(_("Unable to find volume")) return device def _detach_volume_vmdk(self, connection_info, instance, mountpoint): """Detach volume storage to VM instance.""" instance_name = instance['name'] vm_ref = vm_util.get_vm_ref(self._session, instance) # Detach Volume from VM LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance) data = connection_info['data'] device = self._get_vmdk_backed_disk_device(vm_ref, data) # Get the volume ref volume_ref = self._get_volume_ref(data['volume']) self._consolidate_vmdk_volume(instance, vm_ref, device, volume_ref) self.detach_disk_from_vm(vm_ref, instance, device) LOG.info(_("Mountpoint %(mountpoint)s detached from " "instance %(instance_name)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance) def _detach_volume_iscsi(self, connection_info, instance, mountpoint): """Detach volume storage to VM instance.""" instance_name = instance['name'] vm_ref = vm_util.get_vm_ref(self._session, instance) # Detach Volume from VM LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance) data = connection_info['data'] # Discover iSCSI Target device_name, uuid = volume_util.find_st(self._session, data, self._cluster) if device_name is None: raise volume_util.StorageError(_("Unable to find iSCSI Target")) # Get the vmdk file name that the VM is pointing to hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") device = vm_util.get_rdm_disk(hardware_devices, uuid) if device is None: raise volume_util.StorageError(_("Unable to find volume")) self.detach_disk_from_vm(vm_ref, instance, device, destroy_disk=True) LOG.info(_("Mountpoint %(mountpoint)s detached from " "instance %(instance_name)s"), {'mountpoint': mountpoint, 'instance_name': instance_name}, instance=instance) def detach_volume(self, connection_info, instance, mountpoint): """Detach volume storage to VM instance.""" driver_type = connection_info['driver_volume_type'] LOG.debug(_("Volume detach. Driver type: %s"), driver_type, instance=instance) if driver_type == 'vmdk': self._detach_volume_vmdk(connection_info, instance, mountpoint) elif driver_type == 'iscsi': self._detach_volume_iscsi(connection_info, instance, mountpoint) else: raise exception.VolumeDriverNotFound(driver_type=driver_type) def attach_root_volume(self, connection_info, instance, mountpoint, datastore): """Attach a root volume to the VM instance.""" driver_type = connection_info['driver_volume_type'] LOG.debug(_("Root volume attach. Driver type: %s"), driver_type, instance=instance) if self._vc_support and driver_type == 'vmdk': vm_ref = vm_util.get_vm_ref(self._session, instance) data = connection_info['data'] # Get the volume ref volume_ref = self._get_volume_ref(data['volume']) # Pick the resource pool on which the instance resides. Move the # volume to the datastore of the instance. res_pool = self._get_res_pool_of_vm(vm_ref) self._relocate_vmdk_volume(volume_ref, res_pool, datastore) self.attach_volume(connection_info, instance, mountpoint) nova-2014.1.5/nova/virt/vmwareapi/error_util.py0000664000567000056700000001626312540642544022547 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception classes and SOAP response error checking module. """ from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) ALREADY_EXISTS = 'AlreadyExists' CANNOT_DELETE_FILE = 'CannotDeleteFile' FILE_ALREADY_EXISTS = 'FileAlreadyExists' FILE_FAULT = 'FileFault' FILE_LOCKED = 'FileLocked' FILE_NOT_FOUND = 'FileNotFound' INVALID_PROPERTY = 'InvalidProperty' NO_PERMISSION = 'NoPermission' NOT_AUTHENTICATED = 'NotAuthenticated' class VimException(Exception): """The VIM Exception class.""" def __init__(self, exception_summary, excep): Exception.__init__(self) if isinstance(exception_summary, list): # we need this to protect against developers using # this method like VimFaultException raise ValueError("exception_summary must not be a list") self.exception_summary = str(exception_summary) self.exception_obj = excep def __str__(self): return self.exception_summary + ": " + str(self.exception_obj) class SessionOverLoadException(VimException): """Session Overload Exception.""" pass class SessionConnectionException(VimException): """Session Connection Exception.""" pass class VimAttributeError(VimException): """VI Attribute Error.""" pass class VimFaultException(Exception): """The VIM Fault exception class.""" def __init__(self, fault_list, fault_string, details=None): Exception.__init__(self) if not isinstance(fault_list, list): raise ValueError("fault_list must be a list") self.fault_list = fault_list self.fault_string = fault_string self.details = details def __str__(self): if self.details: return '%s %s' % (self.fault_string, self.details) return self.fault_string class FaultCheckers(object): """Methods for fault checking of SOAP response. Per Method error handlers for which we desire error checking are defined. SOAP faults are embedded in the SOAP messages as properties and not as SOAP faults. """ @staticmethod def retrievepropertiesex_fault_checker(resp_obj): """Checks the RetrievePropertiesEx response for errors. Certain faults are sent as part of the SOAP body as property of missingSet. For example NotAuthenticated fault. """ fault_list = [] details = {} if not resp_obj: # This is the case when the session has timed out. ESX SOAP server # sends an empty RetrievePropertiesResponse. Normally missingSet in # the returnval field has the specifics about the error, but that's # not the case with a timed out idle session. It is as bad as a # terminated session for we cannot use the session. So setting # fault to NotAuthenticated fault. fault_list = [NOT_AUTHENTICATED] else: for obj_cont in resp_obj.objects: if hasattr(obj_cont, "missingSet"): for missing_elem in obj_cont.missingSet: fault_type = missing_elem.fault.fault # Fault needs to be added to the type of fault for # uniformity in error checking as SOAP faults define fault_list.append(fault_type.__class__.__name__) if fault_type.__class__.__name__ == NO_PERMISSION: details['object'] = fault_type.object.value details['privilegeId'] = fault_type.privilegeId if fault_list: exc_msg_list = ', '.join(fault_list) fault_string = _("Error(s) %s occurred in the call to " "RetrievePropertiesEx") % exc_msg_list raise VimFaultException(fault_list, fault_string, details) class VMwareDriverException(exception.NovaException): """Base class for all exceptions raised by the VMware Driver. All exceptions raised by the VMwareAPI drivers should raise an exception descended from this class as a root. This will allow the driver to potentially trap problems related to its own internal configuration before halting the nova-compute node. """ msg_fmt = _("VMware Driver fault.") class VMwareDriverConfigurationException(VMwareDriverException): """Base class for all configuration exceptions. """ msg_fmt = _("VMware Driver configuration fault.") class UseLinkedCloneConfigurationFault(VMwareDriverConfigurationException): msg_fmt = _("No default value for use_linked_clone found.") class MissingParameter(VMwareDriverException): msg_fmt = _("Missing parameter : %(param)s") class NoRootDiskDefined(VMwareDriverException): msg_fmt = _("No root disk defined.") class AlreadyExistsException(VMwareDriverException): msg_fmt = _("Resource already exists.") code = 409 class CannotDeleteFileException(VMwareDriverException): msg_fmt = _("Cannot delete file.") code = 403 class FileAlreadyExistsException(VMwareDriverException): msg_fmt = _("File already exists.") code = 409 class FileFaultException(VMwareDriverException): msg_fmt = _("File fault.") code = 409 class FileLockedException(VMwareDriverException): msg_fmt = _("File locked.") code = 403 class FileNotFoundException(VMwareDriverException): msg_fmt = _("File not found.") code = 404 class InvalidPropertyException(VMwareDriverException): msg_fmt = _("Invalid property.") code = 400 class NoPermissionException(VMwareDriverException): msg_fmt = _("No Permission.") code = 403 class NotAuthenticatedException(VMwareDriverException): msg_fmt = _("Not Authenticated.") code = 403 # Populate the fault registry with the exceptions that have # special treatment. _fault_classes_registry = { ALREADY_EXISTS: AlreadyExistsException, CANNOT_DELETE_FILE: CannotDeleteFileException, FILE_ALREADY_EXISTS: FileAlreadyExistsException, FILE_FAULT: FileFaultException, FILE_LOCKED: FileLockedException, FILE_NOT_FOUND: FileNotFoundException, INVALID_PROPERTY: InvalidPropertyException, NO_PERMISSION: NoPermissionException, NOT_AUTHENTICATED: NotAuthenticatedException } def get_fault_class(name): """Get a named subclass of VMwareDriverException.""" name = str(name) fault_class = _fault_classes_registry.get(name) if not fault_class: LOG.warning(_('Fault %s not matched.'), name) fault_class = VMwareDriverException return fault_class nova-2014.1.5/nova/virt/vmwareapi/host.py0000664000567000056700000001747712540642544021346 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for host-related functions (start, reboot, etc). """ from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util LOG = logging.getLogger(__name__) class Host(object): """Implements host related operations.""" def __init__(self, session): self._session = session def host_power_action(self, host, action): """Reboots or shuts down the host.""" host_mor = vm_util.get_host_ref(self._session) LOG.debug(_("%(action)s %(host)s"), {'action': action, 'host': host}) if action == "reboot": host_task = self._session._call_method( self._session._get_vim(), "RebootHost_Task", host_mor, force=False) elif action == "shutdown": host_task = self._session._call_method( self._session._get_vim(), "ShutdownHost_Task", host_mor, force=False) elif action == "startup": host_task = self._session._call_method( self._session._get_vim(), "PowerUpHostFromStandBy_Task", host_mor, timeoutSec=60) self._session._wait_for_task(host_task) def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ host_mor = vm_util.get_host_ref(self._session) LOG.debug(_("Set maintenance mod on %(host)s to %(mode)s"), {'host': host, 'mode': mode}) if mode: host_task = self._session._call_method( self._session._get_vim(), "EnterMaintenanceMode_Task", host_mor, timeout=0, evacuatePoweredOffVms=True) else: host_task = self._session._call_method( self._session._get_vim(), "ExitMaintenanceMode_Task", host_mor, timeout=0) self._session._wait_for_task(host_task) def set_host_enabled(self, _host, enabled): """Sets the specified host's ability to accept new instances.""" pass class HostState(object): """Manages information about the ESX host this compute node is running on. """ def __init__(self, session, host_name): super(HostState, self).__init__() self._session = session self._host_name = host_name self._stats = {} self.update_status() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first. """ if refresh or not self._stats: self.update_status() return self._stats def update_status(self): """Update the current state of the host. """ host_mor = vm_util.get_host_ref(self._session) summary = self._session._call_method(vim_util, "get_dynamic_property", host_mor, "HostSystem", "summary") if summary is None: return try: ds = vm_util.get_datastore_ref_and_name(self._session) except exception.DatastoreNotFound: ds = (None, None, 0, 0) data = {} data["vcpus"] = summary.hardware.numCpuThreads data["cpu_info"] = \ {"vendor": summary.hardware.vendor, "model": summary.hardware.cpuModel, "topology": {"cores": summary.hardware.numCpuCores, "sockets": summary.hardware.numCpuPkgs, "threads": summary.hardware.numCpuThreads} } data["disk_total"] = ds[2] / units.Gi data["disk_available"] = ds[3] / units.Gi data["disk_used"] = data["disk_total"] - data["disk_available"] data["host_memory_total"] = summary.hardware.memorySize / units.Mi data["host_memory_free"] = data["host_memory_total"] - \ summary.quickStats.overallMemoryUsage data["hypervisor_type"] = summary.config.product.name data["hypervisor_version"] = utils.convert_version_to_int( str(summary.config.product.version)) data["hypervisor_hostname"] = self._host_name data["supported_instances"] = [('i686', 'vmware', 'hvm'), ('x86_64', 'vmware', 'hvm')] self._stats = data return data class VCState(object): """Manages information about the VC host this compute node is running on. """ def __init__(self, session, host_name, cluster): super(VCState, self).__init__() self._session = session self._host_name = host_name self._cluster = cluster self._stats = {} self.update_status() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first. """ if refresh or not self._stats: self.update_status() return self._stats def update_status(self): """Update the current state of the cluster.""" # Get the datastore in the cluster try: ds = vm_util.get_datastore_ref_and_name(self._session, self._cluster) except exception.DatastoreNotFound: ds = (None, None, 0, 0) # Get cpu, memory stats from the cluster stats = vm_util.get_stats_from_cluster(self._session, self._cluster) about_info = self._session._call_method(vim_util, "get_about_info") data = {} data["vcpus"] = stats['cpu']['vcpus'] data["cpu_info"] = {"vendor": stats['cpu']['vendor'], "model": stats['cpu']['model'], "topology": {"cores": stats['cpu']['cores'], "threads": stats['cpu']['vcpus']}} data["disk_total"] = ds[2] / units.Gi data["disk_available"] = ds[3] / units.Gi data["disk_used"] = data["disk_total"] - data["disk_available"] data["host_memory_total"] = stats['mem']['total'] data["host_memory_free"] = stats['mem']['free'] data["hypervisor_type"] = about_info.name data["hypervisor_version"] = utils.convert_version_to_int( str(about_info.version)) data["hypervisor_hostname"] = self._host_name data["supported_instances"] = [('i686', 'vmware', 'hvm'), ('x86_64', 'vmware', 'hvm')] self._stats = data return data nova-2014.1.5/nova/virt/vmwareapi/vim_util.py0000664000567000056700000003131612540642544022205 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The VMware API utility module. """ from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging vmware_opts = cfg.IntOpt('maximum_objects', default=100, help='The maximum number of ObjectContent data ' 'objects that should be returned in a single ' 'result. A positive value will cause the ' 'operation to suspend the retrieval when the ' 'count of objects reaches the specified ' 'maximum. The server may still limit the count ' 'to something less than the configured value. ' 'Any remaining objects may be retrieved with ' 'additional requests.') CONF = cfg.CONF CONF.register_opt(vmware_opts, 'vmware') LOG = logging.getLogger(__name__) def build_selection_spec(client_factory, name): """Builds the selection spec.""" sel_spec = client_factory.create('ns0:SelectionSpec') sel_spec.name = name return sel_spec def build_traversal_spec(client_factory, name, spec_type, path, skip, select_set): """Builds the traversal spec object.""" traversal_spec = client_factory.create('ns0:TraversalSpec') traversal_spec.name = name traversal_spec.type = spec_type traversal_spec.path = path traversal_spec.skip = skip traversal_spec.selectSet = select_set return traversal_spec def build_recursive_traversal_spec(client_factory): """Builds the Recursive Traversal Spec to traverse the object managed object hierarchy. """ visit_folders_select_spec = build_selection_spec(client_factory, "visitFolders") # For getting to hostFolder from datacenter dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter", "hostFolder", False, [visit_folders_select_spec]) # For getting to vmFolder from datacenter dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter", "vmFolder", False, [visit_folders_select_spec]) # For getting Host System to virtual machine h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem", "vm", False, [visit_folders_select_spec]) # For getting to Host System from Compute Resource cr_to_h = build_traversal_spec(client_factory, "cr_to_h", "ComputeResource", "host", False, []) # For getting to datastore from Compute Resource cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds", "ComputeResource", "datastore", False, []) rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp") rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm") # For getting to resource pool from Compute Resource cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp", "ComputeResource", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to child res pool from the parent res pool rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool", "resourcePool", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # For getting to Virtual Machine from the Resource Pool rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool", "vm", False, [rp_to_rp_select_spec, rp_to_vm_select_spec]) # Get the assorted traversal spec which takes care of the objects to # be searched for from the root folder traversal_spec = build_traversal_spec(client_factory, "visitFolders", "Folder", "childEntity", False, [visit_folders_select_spec, dc_to_hf, dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp, rp_to_rp, h_to_vm, rp_to_vm]) return traversal_spec def build_property_spec(client_factory, type="VirtualMachine", properties_to_collect=None, all_properties=False): """Builds the Property Spec.""" if not properties_to_collect: properties_to_collect = ["name"] property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = all_properties property_spec.pathSet = properties_to_collect property_spec.type = type return property_spec def build_object_spec(client_factory, root_folder, traversal_specs): """Builds the object Spec.""" object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = root_folder object_spec.skip = False object_spec.selectSet = traversal_specs return object_spec def build_property_filter_spec(client_factory, property_specs, object_specs): """Builds the Property Filter Spec.""" property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_filter_spec.propSet = property_specs property_filter_spec.objectSet = object_specs return property_filter_spec def get_object_properties(vim, collector, mobj, type, properties): """Gets the properties of the Managed object specified.""" client_factory = vim.client.factory if mobj is None: return None usecoll = collector if usecoll is None: usecoll = vim.get_service_content().propertyCollector property_filter_spec = client_factory.create('ns0:PropertyFilterSpec') property_spec = client_factory.create('ns0:PropertySpec') property_spec.all = (properties is None or len(properties) == 0) property_spec.pathSet = properties property_spec.type = type object_spec = client_factory.create('ns0:ObjectSpec') object_spec.obj = mobj object_spec.skip = False property_filter_spec.propSet = [property_spec] property_filter_spec.objectSet = [object_spec] options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx(usecoll, specSet=[property_filter_spec], options=options) def get_dynamic_property(vim, mobj, type, property_name): """Gets a particular property of the Managed Object.""" property_dict = get_dynamic_properties(vim, mobj, type, [property_name]) return property_dict.get(property_name) def get_dynamic_properties(vim, mobj, type, property_names): """Gets the specified properties of the Managed Object.""" obj_content = get_object_properties(vim, None, mobj, type, property_names) if obj_content is None: return {} if hasattr(obj_content, 'token'): cancel_retrieve(vim, obj_content.token) property_dict = {} if obj_content.objects: if hasattr(obj_content.objects[0], 'propSet'): dynamic_properties = obj_content.objects[0].propSet if dynamic_properties: for prop in dynamic_properties: property_dict[prop.name] = prop.val # The object may have information useful for logging if hasattr(obj_content.objects[0], 'missingSet'): for m in obj_content.objects[0].missingSet: LOG.warning(_("Unable to retrieve value for %(path)s " "Reason: %(reason)s"), {'path': m.path, 'reason': m.fault.localizedMessage}) return property_dict def get_objects(vim, type, properties_to_collect=None, all=False): """Gets the list of objects of the type specified.""" if not properties_to_collect: properties_to_collect = ["name"] client_factory = vim.client.factory object_spec = build_object_spec(client_factory, vim.get_service_content().rootFolder, [build_recursive_traversal_spec(client_factory)]) property_spec = build_property_spec(client_factory, type=type, properties_to_collect=properties_to_collect, all_properties=all) property_filter_spec = build_property_filter_spec(client_factory, [property_spec], [object_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx( vim.get_service_content().propertyCollector, specSet=[property_filter_spec], options=options) def get_inner_objects(vim, base_obj, path, inner_type, properties_to_collect=None, all=False): """Gets the list of inner objects of the type specified.""" client_factory = vim.client.factory base_type = base_obj._type traversal_spec = build_traversal_spec(client_factory, 'inner', base_type, path, False, []) object_spec = build_object_spec(client_factory, base_obj, [traversal_spec]) property_spec = build_property_spec(client_factory, type=inner_type, properties_to_collect=properties_to_collect, all_properties=all) property_filter_spec = build_property_filter_spec(client_factory, [property_spec], [object_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx( vim.get_service_content().propertyCollector, specSet=[property_filter_spec], options=options) def cancel_retrieve(vim, token): """Cancels the retrieve operation.""" return vim.CancelRetrievePropertiesEx( vim.get_service_content().propertyCollector, token=token) def continue_to_get_objects(vim, token): """Continues to get the list of objects of the type specified.""" return vim.ContinueRetrievePropertiesEx( vim.get_service_content().propertyCollector, token=token) def get_prop_spec(client_factory, spec_type, properties): """Builds the Property Spec Object.""" prop_spec = client_factory.create('ns0:PropertySpec') prop_spec.type = spec_type prop_spec.pathSet = properties return prop_spec def get_obj_spec(client_factory, obj, select_set=None): """Builds the Object Spec object.""" obj_spec = client_factory.create('ns0:ObjectSpec') obj_spec.obj = obj obj_spec.skip = False if select_set is not None: obj_spec.selectSet = select_set return obj_spec def get_prop_filter_spec(client_factory, obj_spec, prop_spec): """Builds the Property Filter Spec Object.""" prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec def get_properties_for_a_collection_of_objects(vim, type, obj_list, properties): """Gets the list of properties for the collection of objects of the type specified. """ client_factory = vim.client.factory if len(obj_list) == 0: return [] prop_spec = get_prop_spec(client_factory, type, properties) lst_obj_specs = [] for obj in obj_list: lst_obj_specs.append(get_obj_spec(client_factory, obj)) prop_filter_spec = get_prop_filter_spec(client_factory, lst_obj_specs, [prop_spec]) options = client_factory.create('ns0:RetrieveOptions') options.maxObjects = CONF.vmware.maximum_objects return vim.RetrievePropertiesEx( vim.get_service_content().propertyCollector, specSet=[prop_filter_spec], options=options) def get_about_info(vim): """Get the About Info from the service content.""" return vim.get_service_content().about nova-2014.1.5/nova/virt/vmwareapi/vif.py0000664000567000056700000001464312540642544021145 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """VIF drivers for VMware.""" from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import error_util from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util LOG = logging.getLogger(__name__) CONF = cfg.CONF vmwareapi_vif_opts = [ cfg.StrOpt('vlan_interface', default='vmnic0', help='Physical ethernet adapter name for vlan networking'), ] CONF.register_opts(vmwareapi_vif_opts, 'vmware') def _get_associated_vswitch_for_interface(session, interface, cluster=None): # Check if the physical network adapter exists on the host. if not network_util.check_if_vlan_interface_exists(session, interface, cluster): raise exception.NetworkAdapterNotFound(adapter=interface) # Get the vSwitch associated with the Physical Adapter vswitch_associated = network_util.get_vswitch_for_vlan_interface( session, interface, cluster) if not vswitch_associated: raise exception.SwitchNotFoundForNetworkAdapter(adapter=interface) return vswitch_associated def ensure_vlan_bridge(session, vif, cluster=None, create_vlan=True): """Create a vlan and bridge unless they already exist.""" vlan_num = vif['network'].get_meta('vlan') bridge = vif['network']['bridge'] vlan_interface = CONF.vmware.vlan_interface network_ref = network_util.get_network_with_the_name(session, bridge, cluster) if network_ref and network_ref['type'] == 'DistributedVirtualPortgroup': return network_ref if not network_ref: # Create a port group on the vSwitch associated with the # vlan_interface corresponding physical network adapter on the ESX # host. vswitch_associated = _get_associated_vswitch_for_interface(session, vlan_interface, cluster) network_util.create_port_group(session, bridge, vswitch_associated, vlan_num if create_vlan else 0, cluster) network_ref = network_util.get_network_with_the_name(session, bridge, cluster) elif create_vlan: # Get the vSwitch associated with the Physical Adapter vswitch_associated = _get_associated_vswitch_for_interface(session, vlan_interface, cluster) # Get the vlan id and vswitch corresponding to the port group _get_pg_info = network_util.get_vlanid_and_vswitch_for_portgroup pg_vlanid, pg_vswitch = _get_pg_info(session, bridge, cluster) # Check if the vswitch associated is proper if pg_vswitch != vswitch_associated: raise exception.InvalidVLANPortGroup( bridge=bridge, expected=vswitch_associated, actual=pg_vswitch) # Check if the vlan id is proper for the port group if pg_vlanid != vlan_num: raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, pgroup=pg_vlanid) return network_ref def _is_valid_opaque_network_id(opaque_id, bridge_id, integration_bridge, num_networks): return (opaque_id == bridge_id or (num_networks == 1 and opaque_id == integration_bridge)) def _get_network_ref_from_opaque(opaque_networks, integration_bridge, bridge): num_networks = len(opaque_networks) for network in opaque_networks: if _is_valid_opaque_network_id(network['opaqueNetworkId'], bridge, integration_bridge, num_networks): return {'type': 'OpaqueNetwork', 'network-id': network['opaqueNetworkId'], 'network-name': network['opaqueNetworkName'], 'network-type': network['opaqueNetworkType']} LOG.warning(_("No valid network found in %(opaque)s, from %(bridge)s " "or %(integration_bridge)s"), {'opaque': opaque_networks, 'bridge': bridge, 'integration_bridge': integration_bridge}) def get_neutron_network(session, network_name, cluster, vif): host = vm_util.get_host_ref(session, cluster) try: opaque = session._call_method(vim_util, "get_dynamic_property", host, "HostSystem", "config.network.opaqueNetwork") except error_util.InvalidPropertyException: opaque = None if opaque: bridge = vif['network']['id'] opaque_networks = opaque.HostOpaqueNetworkInfo network_ref = _get_network_ref_from_opaque(opaque_networks, CONF.vmware.integration_bridge, bridge) else: bridge = network_name network_ref = network_util.get_network_with_the_name( session, network_name, cluster) if not network_ref: raise exception.NetworkNotFoundForBridge(bridge=bridge) return network_ref def get_network_ref(session, cluster, vif, is_neutron): if is_neutron: network_name = (vif['network']['bridge'] or CONF.vmware.integration_bridge) network_ref = get_neutron_network(session, network_name, cluster, vif) else: create_vlan = vif['network'].get_meta('should_create_vlan', False) network_ref = ensure_vlan_bridge(session, vif, cluster=cluster, create_vlan=create_vlan) return network_ref nova-2014.1.5/nova/virt/vmwareapi/volume_util.py0000664000567000056700000001467212540642544022727 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods for operations related to the management of volumes, and storage repositories """ from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.vmwareapi import vim_util from nova.virt.vmwareapi import vm_util LOG = logging.getLogger(__name__) class StorageError(Exception): """To raise errors related to Volume commands.""" def __init__(self, message=None): super(StorageError, self).__init__(message) def get_host_iqn(session, cluster=None): """Return the host iSCSI IQN.""" host_mor = vm_util.get_host_ref(session, cluster) hbas_ret = session._call_method(vim_util, "get_dynamic_property", host_mor, "HostSystem", "config.storageDevice.hostBusAdapter") # Meaning there are no host bus adapters on the host if hbas_ret is None: return host_hbas = hbas_ret.HostHostBusAdapter if not host_hbas: return for hba in host_hbas: if hba.__class__.__name__ == 'HostInternetScsiHba': return hba.iScsiName def find_st(session, data, cluster=None): """Return the iSCSI Target given a volume info.""" target_portal = data['target_portal'] target_iqn = data['target_iqn'] host_mor = vm_util.get_host_ref(session, cluster) lst_properties = ["config.storageDevice.hostBusAdapter", "config.storageDevice.scsiTopology", "config.storageDevice.scsiLun"] prop_dict = session._call_method(vim_util, "get_dynamic_properties", host_mor, "HostSystem", lst_properties) result = (None, None) hbas_ret = None scsi_topology = None scsi_lun_ret = None if prop_dict: hbas_ret = prop_dict.get('config.storageDevice.hostBusAdapter') scsi_topology = prop_dict.get('config.storageDevice.scsiTopology') scsi_lun_ret = prop_dict.get('config.storageDevice.scsiLun') # Meaning there are no host bus adapters on the host if hbas_ret is None: return result host_hbas = hbas_ret.HostHostBusAdapter if not host_hbas: return result for hba in host_hbas: if hba.__class__.__name__ == 'HostInternetScsiHba': hba_key = hba.key break else: return result if scsi_topology is None: return result host_adapters = scsi_topology.adapter if not host_adapters: return result scsi_lun_key = None for adapter in host_adapters: if adapter.adapter == hba_key: if not getattr(adapter, 'target', None): return result for target in adapter.target: if (getattr(target.transport, 'address', None) and target.transport.address[0] == target_portal and target.transport.iScsiName == target_iqn): if not target.lun: return result for lun in target.lun: if 'host.ScsiDisk' in lun.scsiLun: scsi_lun_key = lun.scsiLun break break break if scsi_lun_key is None: return result if scsi_lun_ret is None: return result host_scsi_luns = scsi_lun_ret.ScsiLun if not host_scsi_luns: return result for scsi_lun in host_scsi_luns: if scsi_lun.key == scsi_lun_key: return (scsi_lun.deviceName, scsi_lun.uuid) return result def rescan_iscsi_hba(session, cluster=None, target_portal=None): """Rescan the iSCSI HBA to discover iSCSI targets.""" host_mor = vm_util.get_host_ref(session, cluster) storage_system_mor = session._call_method(vim_util, "get_dynamic_property", host_mor, "HostSystem", "configManager.storageSystem") hbas_ret = session._call_method(vim_util, "get_dynamic_property", storage_system_mor, "HostStorageSystem", "storageDeviceInfo.hostBusAdapter") # Meaning there are no host bus adapters on the host if hbas_ret is None: return host_hbas = hbas_ret.HostHostBusAdapter if not host_hbas: return for hba in host_hbas: if hba.__class__.__name__ == 'HostInternetScsiHba': hba_device = hba.device if target_portal: # Check if iscsi host is already in the send target host list send_targets = getattr(hba, 'configuredSendTarget', []) send_tgt_portals = ['%s:%s' % (s.address, s.port) for s in send_targets] if target_portal not in send_tgt_portals: _add_iscsi_send_target_host(session, storage_system_mor, hba_device, target_portal) break else: return LOG.debug(_("Rescanning HBA %s") % hba_device) session._call_method(session._get_vim(), "RescanHba", storage_system_mor, hbaDevice=hba_device) LOG.debug(_("Rescanned HBA %s ") % hba_device) def _add_iscsi_send_target_host(session, storage_system_mor, hba_device, target_portal): """Adds the iscsi host to send target host list.""" client_factory = session._get_vim().client.factory send_tgt = client_factory.create('ns0:HostInternetScsiHbaSendTarget') (send_tgt.address, send_tgt.port) = target_portal.split(':') LOG.debug(_("Adding iSCSI host %s to send targets"), send_tgt.address) session._call_method( session._get_vim(), "AddInternetScsiSendTargets", storage_system_mor, iScsiHbaDevice=hba_device, targets=[send_tgt]) nova-2014.1.5/nova/virt/configdrive.py0000664000567000056700000001456012540642544020663 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Config Drive v2 helper.""" import os import shutil import tempfile from oslo.config import cfg from nova import exception from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils from nova import version LOG = logging.getLogger(__name__) configdrive_opts = [ cfg.StrOpt('config_drive_format', default='iso9660', help='Config drive format. One of iso9660 (default) or vfat'), cfg.StrOpt('config_drive_tempdir', default=tempfile.tempdir, help=('Where to put temporary files associated with ' 'config drive creation')), # force_config_drive is a string option, to allow for future behaviors # (e.g. use config_drive based on image properties) cfg.StrOpt('force_config_drive', help='Set to force injection to take place on a config drive ' '(if set, valid options are: always)'), cfg.StrOpt('mkisofs_cmd', default='genisoimage', help='Name and optionally path of the tool used for ' 'ISO image creation') ] CONF = cfg.CONF CONF.register_opts(configdrive_opts) # Config drives are 64mb, if we can't size to the exact size of the data CONFIGDRIVESIZE_BYTES = 64 * units.Mi class ConfigDriveBuilder(object): """Build config drives, optionally as a context manager.""" def __init__(self, instance_md=None): self.imagefile = None # TODO(mikal): I don't think I can use utils.tempdir here, because # I need to have the directory last longer than the scope of this # method call self.tempdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir, prefix='cd_gen_') if instance_md is not None: self.add_instance_metadata(instance_md) def __enter__(self): return self def __exit__(self, exctype, excval, exctb): if exctype is not None: # NOTE(mikal): this means we're being cleaned up because an # exception was thrown. All bets are off now, and we should not # swallow the exception return False self.cleanup() def _add_file(self, path, data): filepath = os.path.join(self.tempdir, path) dirname = os.path.dirname(filepath) fileutils.ensure_tree(dirname) with open(filepath, 'wb') as f: f.write(data) def add_instance_metadata(self, instance_md): for (path, value) in instance_md.metadata_for_config_drive(): self._add_file(path, value) LOG.debug(_('Added %(filepath)s to config drive'), {'filepath': path}) def _make_iso9660(self, path): publisher = "%(product)s %(version)s" % { 'product': version.product_string(), 'version': version.version_string_with_package() } utils.execute(CONF.mkisofs_cmd, '-o', path, '-ldots', '-allow-lowercase', '-allow-multidot', '-l', '-publisher', publisher, '-quiet', '-J', '-r', '-V', 'config-2', self.tempdir, attempts=1, run_as_root=False) def _make_vfat(self, path): # NOTE(mikal): This is a little horrible, but I couldn't find an # equivalent to genisoimage for vfat filesystems. with open(path, 'wb') as f: f.truncate(CONFIGDRIVESIZE_BYTES) utils.mkfs('vfat', path, label='config-2') mounted = False try: mountdir = tempfile.mkdtemp(dir=CONF.config_drive_tempdir, prefix='cd_mnt_') _out, err = utils.trycmd('mount', '-o', 'loop,uid=%d,gid=%d' % (os.getuid(), os.getgid()), path, mountdir, run_as_root=True) if err: raise exception.ConfigDriveMountFailed(operation='mount', error=err) mounted = True # NOTE(mikal): I can't just use shutils.copytree here, because the # destination directory already exists. This is annoying. for ent in os.listdir(self.tempdir): shutil.copytree(os.path.join(self.tempdir, ent), os.path.join(mountdir, ent)) finally: if mounted: utils.execute('umount', mountdir, run_as_root=True) shutil.rmtree(mountdir) def make_drive(self, path): """Make the config drive. :param path: the path to place the config drive image at :raises ProcessExecuteError if a helper process has failed. """ if CONF.config_drive_format == 'iso9660': self._make_iso9660(path) elif CONF.config_drive_format == 'vfat': self._make_vfat(path) else: raise exception.ConfigDriveUnknownFormat( format=CONF.config_drive_format) def cleanup(self): if self.imagefile: fileutils.delete_if_exists(self.imagefile) try: shutil.rmtree(self.tempdir) except OSError as e: LOG.error(_('Could not remove tmpdir: %s'), str(e)) def required_by(instance): return instance.get('config_drive') or CONF.force_config_drive nova-2014.1.5/nova/virt/virtapi.py0000664000567000056700000000350112540642544020033 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib class VirtAPI(object): def instance_update(self, context, instance_uuid, updates): """Perform an instance update operation on behalf of a virt driver :param context: security context :param instance_uuid: uuid of the instance to be updated :param updates: dict of attribute=value pairs to change Returns: orig_instance, new_instance """ raise NotImplementedError() def provider_fw_rule_get_all(self, context): """Get the provider firewall rules :param context: security context """ raise NotImplementedError() def agent_build_get_by_triple(self, context, hypervisor, os, architecture): """Get information about the available agent builds for a given hypervisor, os, and architecture :param context: security context :param hypervisor: agent hypervisor type :param os: agent operating system type :param architecture: agent architecture """ raise NotImplementedError() @contextlib.contextmanager def wait_for_instance_event(self, instance, event_names, deadline=300, error_callback=None): raise NotImplementedError() nova-2014.1.5/nova/virt/watchdog_actions.py0000664000567000056700000000163312540642532021676 0ustar jenkinsjenkins00000000000000# Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Describes and verifies the watchdog device actions.""" # the values which may be passed to libvirt RAW_WATCHDOG_ACTIONS = ['poweroff', 'reset', 'pause', 'none'] def is_valid_watchdog_action(val): """Check if the given value is a valid watchdog device parameter.""" return val in RAW_WATCHDOG_ACTIONS nova-2014.1.5/nova/virt/disk/0000775000567000056700000000000012540643452016735 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/disk/mount/0000775000567000056700000000000012540643452020077 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/disk/mount/nbd.py0000664000567000056700000001112712540642544021217 0ustar jenkinsjenkins00000000000000# Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Support for mounting images with qemu-nbd.""" import os import random import re import time from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.disk.mount import api LOG = logging.getLogger(__name__) nbd_opts = [ cfg.IntOpt('timeout_nbd', default=10, help='Amount of time, in seconds, to wait for NBD ' 'device start up.'), ] CONF = cfg.CONF CONF.register_opts(nbd_opts) NBD_DEVICE_RE = re.compile('nbd[0-9]+') class NbdMount(api.Mount): """qemu-nbd support disk images.""" mode = 'nbd' def _detect_nbd_devices(self): """Detect nbd device files.""" return filter(NBD_DEVICE_RE.match, os.listdir('/sys/block/')) def _find_unused(self, devices): for device in devices: if not os.path.exists(os.path.join('/sys/block/', device, 'pid')): if not os.path.exists('/var/lock/qemu-nbd-%s' % device): return device else: LOG.error(_('NBD error - previous umount did not cleanup ' '/var/lock/qemu-nbd-%s.'), device) LOG.warn(_('No free nbd devices')) return None def _allocate_nbd(self): if not os.path.exists('/sys/block/nbd0'): LOG.error(_('nbd module not loaded')) self.error = _('nbd unavailable: module not loaded') return None devices = self._detect_nbd_devices() random.shuffle(devices) device = self._find_unused(devices) if not device: # really want to log this info, not raise self.error = _('No free nbd devices') return None return os.path.join('/dev', device) @utils.synchronized('nbd-allocation-lock') def _inner_get_dev(self): device = self._allocate_nbd() if not device: return False # NOTE(mikal): qemu-nbd will return an error if the device file is # already in use. LOG.debug(_('Get nbd device %(dev)s for %(imgfile)s'), {'dev': device, 'imgfile': self.image}) _out, err = utils.trycmd('qemu-nbd', '-c', device, self.image, run_as_root=True) if err: self.error = _('qemu-nbd error: %s') % err LOG.info(_('NBD mount error: %s'), self.error) return False # NOTE(vish): this forks into another process, so give it a chance # to set up before continuing pidfile = "/sys/block/%s/pid" % os.path.basename(device) for _i in range(CONF.timeout_nbd): if os.path.exists(pidfile): self.device = device break time.sleep(1) else: self.error = _('nbd device %s did not show up') % device LOG.info(_('NBD mount error: %s'), self.error) # Cleanup _out, err = utils.trycmd('qemu-nbd', '-d', device, run_as_root=True) if err: LOG.warn(_('Detaching from erroneous nbd device returned ' 'error: %s'), err) return False self.error = '' self.linked = True return True def get_dev(self): """Retry requests for NBD devices.""" return self._get_dev_retry_helper() def unget_dev(self): if not self.linked: return LOG.debug(_('Release nbd device %s'), self.device) utils.execute('qemu-nbd', '-d', self.device, run_as_root=True) self.linked = False self.device = None def flush_dev(self): """flush NBD block device buffer.""" # Perform an explicit BLKFLSBUF to support older qemu-nbd(s). # Without this flush, when a nbd device gets re-used the # qemu-nbd intermittently hangs. if self.device: utils.execute('blockdev', '--flushbufs', self.device, run_as_root=True) nova-2014.1.5/nova/virt/disk/mount/loop.py0000664000567000056700000000433512540642544021430 0ustar jenkinsjenkins00000000000000# Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Support for mounting images with the loop device.""" from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.disk.mount import api LOG = logging.getLogger(__name__) class LoopMount(api.Mount): """loop back support for raw images.""" mode = 'loop' def _inner_get_dev(self): out, err = utils.trycmd('losetup', '--find', '--show', self.image, run_as_root=True) if err: self.error = _('Could not attach image to loopback: %s') % err LOG.info(_('Loop mount error: %s'), self.error) self.linked = False self.device = None return False self.device = out.strip() LOG.debug(_("Got loop device %s"), self.device) self.linked = True return True def get_dev(self): # NOTE(mikal): the retry is required here in case we are low on loop # devices. Note however that modern kernels will use more loop devices # if they exist. If you're seeing lots of retries, consider adding # more devices. return self._get_dev_retry_helper() def unget_dev(self): if not self.linked: return # NOTE(mikal): On some kernels, losetup -d will intermittently fail, # thus leaking a loop device unless the losetup --detach is retried: # https://lkml.org/lkml/2012/9/28/62 LOG.debug(_("Release loop device %s"), self.device) utils.execute('losetup', '--detach', self.device, run_as_root=True, attempts=3) self.linked = False self.device = None nova-2014.1.5/nova/virt/disk/mount/__init__.py0000664000567000056700000000117612540642532022213 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Support for mounting disk images on the host filesystem """ nova-2014.1.5/nova/virt/disk/mount/api.py0000664000567000056700000002135512540642544021231 0ustar jenkinsjenkins00000000000000# Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Support for mounting virtual image files.""" import os import time from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils LOG = logging.getLogger(__name__) MAX_DEVICE_WAIT = 30 class Mount(object): """Standard mounting operations, that can be overridden by subclasses. The basic device operations provided are get, map and mount, to be called in that order. """ mode = None # to be overridden in subclasses @staticmethod def instance_for_format(imgfile, mountdir, partition, imgfmt): LOG.debug(_("Instance for format imgfile=%(imgfile)s " "mountdir=%(mountdir)s partition=%(partition)s " "imgfmt=%(imgfmt)s"), {'imgfile': imgfile, 'mountdir': mountdir, 'partition': partition, 'imgfmt': imgfmt}) if imgfmt == "raw": LOG.debug(_("Using LoopMount")) return importutils.import_object( "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition) else: LOG.debug(_("Using NbdMount")) return importutils.import_object( "nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition) @staticmethod def instance_for_device(imgfile, mountdir, partition, device): LOG.debug(_("Instance for device imgfile=%(imgfile)s " "mountdir=%(mountdir)s partition=%(partition)s " "device=%(device)s"), {'imgfile': imgfile, 'mountdir': mountdir, 'partition': partition, 'device': device}) if "loop" in device: LOG.debug(_("Using LoopMount")) return importutils.import_object( "nova.virt.disk.mount.loop.LoopMount", imgfile, mountdir, partition, device) else: LOG.debug(_("Using NbdMount")) return importutils.import_object( "nova.virt.disk.mount.nbd.NbdMount", imgfile, mountdir, partition, device) def __init__(self, image, mount_dir, partition=None, device=None): # Input self.image = image self.partition = partition self.mount_dir = mount_dir # Output self.error = "" # Internal self.linked = self.mapped = self.mounted = self.automapped = False self.device = self.mapped_device = device # Reset to mounted dir if possible self.reset_dev() def reset_dev(self): """Reset device paths to allow unmounting.""" if not self.device: return self.linked = self.mapped = self.mounted = True device = self.device if os.path.isabs(device) and os.path.exists(device): if device.startswith('/dev/mapper/'): device = os.path.basename(device) device, self.partition = device.rsplit('p', 1) self.device = os.path.join('/dev', device) def get_dev(self): """Make the image available as a block device in the file system.""" self.device = None self.linked = True return True def _get_dev_retry_helper(self): """Some implementations need to retry their get_dev.""" # NOTE(mikal): This method helps implement retries. The implementation # simply calls _get_dev_retry_helper from their get_dev, and implements # _inner_get_dev with their device acquisition logic. The NBD # implementation has an example. start_time = time.time() device = self._inner_get_dev() while not device: LOG.info(_('Device allocation failed. Will retry in 2 seconds.')) time.sleep(2) if time.time() - start_time > MAX_DEVICE_WAIT: LOG.warn(_('Device allocation failed after repeated retries.')) return False device = self._inner_get_dev() return True def _inner_get_dev(self): raise NotImplementedError() def unget_dev(self): """Release the block device from the file system namespace.""" self.linked = False def map_dev(self): """Map partitions of the device to the file system namespace.""" assert(os.path.exists(self.device)) LOG.debug(_("Map dev %s"), self.device) automapped_path = '/dev/%sp%s' % (os.path.basename(self.device), self.partition) if self.partition == -1: self.error = _('partition search unsupported with %s') % self.mode elif self.partition and not os.path.exists(automapped_path): map_path = '/dev/mapper/%sp%s' % (os.path.basename(self.device), self.partition) assert(not os.path.exists(map_path)) # Note kpartx can output warnings to stderr and succeed # Also it can output failures to stderr and "succeed" # So we just go on the existence of the mapped device _out, err = utils.trycmd('kpartx', '-a', self.device, run_as_root=True, discard_warnings=True) # Note kpartx does nothing when presented with a raw image, # so given we only use it when we expect a partitioned image, fail if not os.path.exists(map_path): if not err: err = _('partition %s not found') % self.partition self.error = _('Failed to map partitions: %s') % err else: self.mapped_device = map_path self.mapped = True elif self.partition and os.path.exists(automapped_path): # Note auto mapping can be enabled with the 'max_part' option # to the nbd or loop kernel modules. Beware of possible races # in the partition scanning for _loop_ devices though # (details in bug 1024586), which are currently uncatered for. self.mapped_device = automapped_path self.mapped = True self.automapped = True else: self.mapped_device = self.device self.mapped = True return self.mapped def unmap_dev(self): """Remove partitions of the device from the file system namespace.""" if not self.mapped: return LOG.debug(_("Unmap dev %s"), self.device) if self.partition and not self.automapped: utils.execute('kpartx', '-d', self.device, run_as_root=True) self.mapped = False self.automapped = False def mnt_dev(self): """Mount the device into the file system.""" LOG.debug(_("Mount %(dev)s on %(dir)s") % {'dev': self.mapped_device, 'dir': self.mount_dir}) _out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir, discard_warnings=True, run_as_root=True) if err: self.error = _('Failed to mount filesystem: %s') % err LOG.debug(self.error) return False self.mounted = True return True def unmnt_dev(self): """Unmount the device from the file system.""" if not self.mounted: return self.flush_dev() LOG.debug(_("Umount %s") % self.mapped_device) utils.execute('umount', self.mapped_device, run_as_root=True) self.mounted = False def flush_dev(self): pass def do_mount(self): """Call the get, map and mnt operations.""" status = False try: status = self.get_dev() and self.map_dev() and self.mnt_dev() finally: if not status: LOG.debug(_("Fail to mount, tearing back down")) self.do_teardown() return status def do_umount(self): """Call the unmnt operation.""" if self.mounted: self.unmnt_dev() def do_teardown(self): """Call the umnt, unmap, and unget operations.""" if self.mounted: self.unmnt_dev() if self.mapped: self.unmap_dev() if self.linked: self.unget_dev() nova-2014.1.5/nova/virt/disk/__init__.py0000664000567000056700000000123212540642532021042 0ustar jenkinsjenkins00000000000000# Copyright 2011 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Operations on disk images including: resize, file system creation, data injection. """ nova-2014.1.5/nova/virt/disk/api.py0000664000567000056700000005523212540642544020070 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # # Copyright 2011, Piston Cloud Computing, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility methods to resize, repartition, and modify disk images. Includes injection of SSH PGP keys into authorized_keys file. """ import os import random import tempfile if os.name != 'nt': import crypt from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import paths from nova import utils from nova.virt.disk.mount import api as mount from nova.virt.disk.vfs import api as vfs from nova.virt import images LOG = logging.getLogger(__name__) disk_opts = [ cfg.StrOpt('injected_network_template', default=paths.basedir_def('nova/virt/interfaces.template'), help='Template file for injected network'), # NOTE(yamahata): ListOpt won't work because the command may include a # comma. For example: # # mkfs.ext3 -O dir_index,extent -E stride=8,stripe-width=16 # --label %(fs_label)s %(target)s # # list arguments are comma separated and there is no way to # escape such commas. # cfg.MultiStrOpt('virt_mkfs', default=[], help='Name of the mkfs commands for ephemeral device. ' 'The format is ='), cfg.BoolOpt('resize_fs_using_block_device', default=False, help='Attempt to resize the filesystem by accessing the ' 'image over a block device. This is done by the host ' 'and may not be necessary if the image contains a recent ' 'version of cloud-init. Possible mechanisms require ' 'the nbd driver (for qcow and raw), or loop (for raw).'), ] CONF = cfg.CONF CONF.register_opts(disk_opts) CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') _MKFS_COMMAND = {} _DEFAULT_MKFS_COMMAND = None _DEFAULT_FS_BY_OSTYPE = {'linux': 'ext3', 'windows': 'ntfs'} for s in CONF.virt_mkfs: # NOTE(yamahata): mkfs command may includes '=' for its options. # So item.partition('=') doesn't work here os_type, mkfs_command = s.split('=', 1) if os_type: _MKFS_COMMAND[os_type] = mkfs_command if os_type == 'default': _DEFAULT_MKFS_COMMAND = mkfs_command def get_fs_type_for_os_type(os_type): return os_type if _MKFS_COMMAND.get(os_type) else 'default' def mkfs(os_type, fs_label, target, run_as_root=True): """Format a file or block device using a user provided command for each os type. If user has not provided any configuration, format type will be used according to a default_ephemeral_format configuration or a system defaults. """ mkfs_command = (_MKFS_COMMAND.get(os_type, _DEFAULT_MKFS_COMMAND) or '') % {'fs_label': fs_label, 'target': target} if mkfs_command: utils.execute(*mkfs_command.split(), run_as_root=run_as_root) else: default_fs = CONF.default_ephemeral_format if not default_fs: default_fs = _DEFAULT_FS_BY_OSTYPE.get(os_type, 'ext3') utils.mkfs(default_fs, target, fs_label, run_as_root=run_as_root) def resize2fs(image, check_exit_code=False, run_as_root=False): try: utils.execute('e2fsck', '-fp', image, check_exit_code=[0, 1, 2], run_as_root=run_as_root) except processutils.ProcessExecutionError as exc: LOG.debug(_("Checking the file system with e2fsck has failed, " "the resize will be aborted. (%s)"), exc) else: utils.execute('resize2fs', image, check_exit_code=check_exit_code, run_as_root=run_as_root) def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ return images.qemu_img_info(path).virtual_size def extend(image, size, use_cow=False): """Increase image to size.""" if not can_resize_image(image, size): return utils.execute('qemu-img', 'resize', image, size) # if we can't access the filesystem, we can't do anything more if not is_image_partitionless(image, use_cow): return def safe_resize2fs(dev, run_as_root=False, finally_call=lambda: None): try: resize2fs(dev, run_as_root=run_as_root, check_exit_code=[0]) except processutils.ProcessExecutionError as exc: LOG.debug(_("Resizing the file system with resize2fs " "has failed with error: %s"), exc) finally: finally_call() # NOTE(vish): attempts to resize filesystem if use_cow: if CONF.resize_fs_using_block_device: # in case of non-raw disks we can't just resize the image, but # rather the mounted device instead mounter = mount.Mount.instance_for_format( image, None, None, 'qcow2') if mounter.get_dev(): safe_resize2fs(mounter.device, run_as_root=True, finally_call=mounter.unget_dev) else: safe_resize2fs(image) def can_resize_image(image, size): """Check whether we can resize the container image file.""" LOG.debug(_('Checking if we can resize image %(image)s. ' 'size=%(size)s'), {'image': image, 'size': size}) # Check that we're increasing the size virt_size = get_disk_size(image) if virt_size >= size: LOG.debug(_('Cannot resize image %s to a smaller size.'), image) return False return True def is_image_partitionless(image, use_cow=False): """Check whether we can resize contained file system.""" LOG.debug(_('Checking if we can resize filesystem inside %(image)s. ' 'CoW=%(use_cow)s'), {'image': image, 'use_cow': use_cow}) # Check the image is unpartitioned if use_cow: try: fs = vfs.VFS.instance_for_image(image, 'qcow2', None) fs.setup() fs.teardown() except exception.NovaException as e: LOG.debug(_('Unable to mount image %(image)s with ' 'error %(error)s. Cannot resize.'), {'image': image, 'error': e}) return False else: # For raw, we can directly inspect the file system try: utils.execute('e2label', image) except processutils.ProcessExecutionError as e: LOG.debug(_('Unable to determine label for image %(image)s with ' 'error %(error)s. Cannot resize.'), {'image': image, 'error': e}) return False return True class _DiskImage(object): """Provide operations on a disk image file.""" tmp_prefix = 'openstack-disk-mount-tmp' def __init__(self, image, partition=None, use_cow=False, mount_dir=None): # These passed to each mounter self.image = image self.partition = partition self.mount_dir = mount_dir self.use_cow = use_cow # Internal self._mkdir = False self._mounter = None self._errors = [] if mount_dir: device = self._device_for_path(mount_dir) if device: self._reset(device) @staticmethod def _device_for_path(path): device = None path = os.path.realpath(path) with open("/proc/mounts", 'r') as ifp: for line in ifp: fields = line.split() if fields[1] == path: device = fields[0] break return device def _reset(self, device): """Reset internal state for a previously mounted directory.""" self._mounter = mount.Mount.instance_for_device(self.image, self.mount_dir, self.partition, device) mount_name = os.path.basename(self.mount_dir or '') self._mkdir = mount_name.startswith(self.tmp_prefix) @property def errors(self): """Return the collated errors from all operations.""" return '\n--\n'.join([''] + self._errors) def mount(self): """Mount a disk image, using the object attributes. The first supported means provided by the mount classes is used. True, or False is returned and the 'errors' attribute contains any diagnostics. """ if self._mounter: raise exception.NovaException(_('image already mounted')) if not self.mount_dir: self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix) self._mkdir = True imgfmt = "raw" if self.use_cow: imgfmt = "qcow2" mounter = mount.Mount.instance_for_format(self.image, self.mount_dir, self.partition, imgfmt) if mounter.do_mount(): self._mounter = mounter return self._mounter.device else: LOG.debug(mounter.error) self._errors.append(mounter.error) return None def umount(self): """Umount a mount point from the filesystem.""" if self._mounter: self._mounter.do_umount() self._mounter = None def teardown(self): """Remove a disk image from the file system.""" try: if self._mounter: self._mounter.do_teardown() self._mounter = None finally: if self._mkdir: os.rmdir(self.mount_dir) # Public module functions def inject_data(image, key=None, net=None, metadata=None, admin_password=None, files=None, partition=None, use_cow=False, mandatory=()): """Inject the specified items into a disk image. If an item name is not specified in the MANDATORY iterable, then a warning is logged on failure to inject that item, rather than raising an exception. it will mount the image as a fully partitioned disk and attempt to inject into the specified partition number. If PARTITION is not specified the image is mounted as a single partition. Returns True if all requested operations completed without issue. Raises an exception if a mandatory item can't be injected. """ LOG.debug(_("Inject data image=%(image)s key=%(key)s net=%(net)s " "metadata=%(metadata)s admin_password= " "files=%(files)s partition=%(partition)s use_cow=%(use_cow)s"), {'image': image, 'key': key, 'net': net, 'metadata': metadata, 'files': files, 'partition': partition, 'use_cow': use_cow}) fmt = "raw" if use_cow: fmt = "qcow2" try: # Note(mrda): Test if the image exists first to short circuit errors os.stat(image) fs = vfs.VFS.instance_for_image(image, fmt, partition) fs.setup() except Exception as e: # If a mandatory item is passed to this function, # then reraise the exception to indicate the error. for inject in mandatory: inject_val = locals()[inject] if inject_val: raise LOG.warn(_('Ignoring error injecting data into image ' '(%(e)s)'), {'e': e}) return False try: return inject_data_into_fs(fs, key, net, metadata, admin_password, files, mandatory) finally: fs.teardown() def setup_container(image, container_dir, use_cow=False): """Setup the LXC container. It will mount the loopback image to the container directory in order to create the root filesystem for the container. Returns path of image device which is mounted to the container directory. """ img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir) dev = img.mount() if dev is None: LOG.error(_("Failed to mount container filesystem '%(image)s' " "on '%(target)s': %(errors)s"), {"image": img, "target": container_dir, "errors": img.errors}) raise exception.NovaException(img.errors) return dev def teardown_container(container_dir, container_root_device=None): """Teardown the container rootfs mounting once it is spawned. It will umount the container that is mounted, and delete any linked devices. """ try: img = _DiskImage(image=None, mount_dir=container_dir) img.teardown() # Make sure container_root_device is released when teardown container. if container_root_device: if 'loop' in container_root_device: LOG.debug(_("Release loop device %s"), container_root_device) utils.execute('losetup', '--detach', container_root_device, run_as_root=True, attempts=3) else: LOG.debug(_('Release nbd device %s'), container_root_device) utils.execute('qemu-nbd', '-d', container_root_device, run_as_root=True) except Exception as exn: LOG.exception(_('Failed to teardown container filesystem: %s'), exn) def clean_lxc_namespace(container_dir): """Clean up the container namespace rootfs mounting one spawned. It will umount the mounted names that are mounted but leave the linked devices alone. """ try: img = _DiskImage(image=None, mount_dir=container_dir) img.umount() except Exception as exn: LOG.exception(_('Failed to umount container filesystem: %s'), exn) def inject_data_into_fs(fs, key, net, metadata, admin_password, files, mandatory=()): """Injects data into a filesystem already mounted by the caller. Virt connections can call this directly if they mount their fs in a different way to inject_data. If an item name is not specified in the MANDATORY iterable, then a warning is logged on failure to inject that item, rather than raising an exception. Returns True if all requested operations completed without issue. Raises an exception if a mandatory item can't be injected. """ status = True for inject in ('key', 'net', 'metadata', 'admin_password', 'files'): inject_val = locals()[inject] inject_func = globals()['_inject_%s_into_fs' % inject] if inject_val: try: inject_func(inject_val, fs) except Exception as e: if inject in mandatory: raise LOG.warn(_('Ignoring error injecting %(inject)s into image ' '(%(e)s)'), {'e': e, 'inject': inject}) status = False return status def _inject_files_into_fs(files, fs): for (path, contents) in files: # NOTE(wangpan): Ensure the parent dir of injecting file exists parent_dir = os.path.dirname(path) if (len(parent_dir) > 0 and parent_dir != "/" and not fs.has_file(parent_dir)): fs.make_path(parent_dir) fs.set_ownership(parent_dir, "root", "root") fs.set_permissions(parent_dir, 0o744) _inject_file_into_fs(fs, path, contents) def _inject_file_into_fs(fs, path, contents, append=False): LOG.debug(_("Inject file fs=%(fs)s path=%(path)s append=%(append)s"), {'fs': fs, 'path': path, 'append': append}) if append: fs.append_file(path, contents) else: fs.replace_file(path, contents) def _inject_metadata_into_fs(metadata, fs): LOG.debug(_("Inject metadata fs=%(fs)s metadata=%(metadata)s"), {'fs': fs, 'metadata': metadata}) _inject_file_into_fs(fs, 'meta.js', jsonutils.dumps(metadata)) def _setup_selinux_for_keys(fs, sshdir): """Get selinux guests to ensure correct context on injected keys.""" if not fs.has_file(os.path.join("etc", "selinux")): return rclocal = os.path.join('etc', 'rc.local') rc_d = os.path.join('etc', 'rc.d') if not fs.has_file(rclocal) and fs.has_file(rc_d): rclocal = os.path.join(rc_d, 'rc.local') # Note some systems end rc.local with "exit 0" # and so to append there you'd need something like: # utils.execute('sed', '-i', '${/^exit 0$/d}' rclocal, run_as_root=True) restorecon = [ '\n', '# Added by Nova to ensure injected ssh keys have the right context\n', 'restorecon -RF %s 2>/dev/null || :\n' % sshdir, ] if not fs.has_file(rclocal): restorecon.insert(0, '#!/bin/sh') _inject_file_into_fs(fs, rclocal, ''.join(restorecon), append=True) fs.set_permissions(rclocal, 0o700) def _inject_key_into_fs(key, fs): """Add the given public ssh key to root's authorized_keys. key is an ssh key string. fs is the path to the base of the filesystem into which to inject the key. """ LOG.debug(_("Inject key fs=%(fs)s key=%(key)s"), {'fs': fs, 'key': key}) sshdir = os.path.join('root', '.ssh') fs.make_path(sshdir) fs.set_ownership(sshdir, "root", "root") fs.set_permissions(sshdir, 0o700) keyfile = os.path.join(sshdir, 'authorized_keys') key_data = ''.join([ '\n', '# The following ssh key was injected by Nova', '\n', key.strip(), '\n', ]) _inject_file_into_fs(fs, keyfile, key_data, append=True) fs.set_permissions(keyfile, 0o600) _setup_selinux_for_keys(fs, sshdir) def _inject_net_into_fs(net, fs): """Inject /etc/network/interfaces into the filesystem rooted at fs. net is the contents of /etc/network/interfaces. """ LOG.debug(_("Inject key fs=%(fs)s net=%(net)s"), {'fs': fs, 'net': net}) netdir = os.path.join('etc', 'network') fs.make_path(netdir) fs.set_ownership(netdir, "root", "root") fs.set_permissions(netdir, 0o744) netfile = os.path.join('etc', 'network', 'interfaces') _inject_file_into_fs(fs, netfile, net) def _inject_admin_password_into_fs(admin_passwd, fs): """Set the root password to admin_passwd admin_password is a root password fs is the path to the base of the filesystem into which to inject the key. This method modifies the instance filesystem directly, and does not require a guest agent running in the instance. """ # The approach used here is to copy the password and shadow # files from the instance filesystem to local files, make any # necessary changes, and then copy them back. LOG.debug(_("Inject admin password fs=%(fs)s " "admin_passwd="), {'fs': fs}) admin_user = 'root' passwd_path = os.path.join('etc', 'passwd') shadow_path = os.path.join('etc', 'shadow') passwd_data = fs.read_file(passwd_path) shadow_data = fs.read_file(shadow_path) new_shadow_data = _set_passwd(admin_user, admin_passwd, passwd_data, shadow_data) fs.replace_file(shadow_path, new_shadow_data) def _generate_salt(): salt_set = ('abcdefghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' '0123456789./') salt = 16 * ' ' return ''.join([random.choice(salt_set) for c in salt]) def _set_passwd(username, admin_passwd, passwd_data, shadow_data): """set the password for username to admin_passwd The passwd_file is not modified. The shadow_file is updated. if the username is not found in both files, an exception is raised. :param username: the username :param encrypted_passwd: the encrypted password :param passwd_file: path to the passwd file :param shadow_file: path to the shadow password file :returns: nothing :raises: exception.NovaException(), IOError() """ if os.name == 'nt': raise exception.NovaException(_('Not implemented on Windows')) # encryption algo - id pairs for crypt() algos = {'SHA-512': '$6$', 'SHA-256': '$5$', 'MD5': '$1$', 'DES': ''} salt = _generate_salt() # crypt() depends on the underlying libc, and may not support all # forms of hash. We try md5 first. If we get only 13 characters back, # then the underlying crypt() didn't understand the '$n$salt' magic, # so we fall back to DES. # md5 is the default because it's widely supported. Although the # local crypt() might support stronger SHA, the target instance # might not. encrypted_passwd = crypt.crypt(admin_passwd, algos['MD5'] + salt) if len(encrypted_passwd) == 13: encrypted_passwd = crypt.crypt(admin_passwd, algos['DES'] + salt) p_file = passwd_data.split("\n") s_file = shadow_data.split("\n") # username MUST exist in passwd file or it's an error found = False for entry in p_file: split_entry = entry.split(':') if split_entry[0] == username: found = True break if not found: msg = _('User %(username)s not found in password file.') raise exception.NovaException(msg % username) # update password in the shadow file.It's an error if the # the user doesn't exist. new_shadow = list() found = False for entry in s_file: split_entry = entry.split(':') if split_entry[0] == username: split_entry[1] = encrypted_passwd found = True new_entry = ':'.join(split_entry) new_shadow.append(new_entry) if not found: msg = _('User %(username)s not found in shadow file.') raise exception.NovaException(msg % username) return "\n".join(new_shadow) nova-2014.1.5/nova/virt/disk/vfs/0000775000567000056700000000000012540643452017533 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/disk/vfs/__init__.py0000664000567000056700000000115012540642532021637 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Operations on virtual filesystems """ nova-2014.1.5/nova/virt/disk/vfs/api.py0000664000567000056700000000771112540642544020665 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) class VFS(object): @staticmethod def instance_for_image(imgfile, imgfmt, partition): LOG.debug(_("Instance for image imgfile=%(imgfile)s " "imgfmt=%(imgfmt)s partition=%(partition)s"), {'imgfile': imgfile, 'imgfmt': imgfmt, 'partition': partition}) hasGuestfs = False try: LOG.debug(_("Trying to import guestfs")) importutils.import_module("guestfs") hasGuestfs = True except Exception: pass if hasGuestfs: LOG.debug(_("Using primary VFSGuestFS")) return importutils.import_object( "nova.virt.disk.vfs.guestfs.VFSGuestFS", imgfile, imgfmt, partition) else: LOG.debug(_("Falling back to VFSLocalFS")) return importutils.import_object( "nova.virt.disk.vfs.localfs.VFSLocalFS", imgfile, imgfmt, partition) """ The VFS class defines an interface for manipulating files within a virtual disk image filesystem. This allows file injection code to avoid the assumption that the virtual disk image can be mounted in the host filesystem. All paths provided to the APIs in this class should be relative to the root of the virtual disk image filesystem. Subclasses will translate paths as required by their implementation. """ def __init__(self, imgfile, imgfmt, partition): self.imgfile = imgfile self.imgfmt = imgfmt self.partition = partition """ Perform any one-time setup tasks to make the virtual filesystem available to future API calls """ def setup(self): pass """ Release all resources initialized in the setup method """ def teardown(self): pass """ Create a directory @path, including all intermedia path components if they do not already exist """ def make_path(self, path): pass """ Append @content to the end of the file identified by @path, creating the file if it does not already exist """ def append_file(self, path, content): pass """ Replace the entire contents of the file identified by @path, with @content, creating the file if it does not already exist """ def replace_file(self, path, content): pass """ Return the entire contents of the file identified by @path """ def read_file(self, path): pass """ Return a True if the file identified by @path exists """ def has_file(self, path): pass """ Set the permissions on the file identified by @path to @mode. The file must exist prior to this call. """ def set_permissions(self, path, mode): pass """ Set the ownership on the file identified by @path to the username @user and groupname @group. Either of @user or @group may be None, in which case the current ownership will be left unchanged. The ownership must be passed in string form, allowing subclasses to translate to uid/gid form as required. The file must exist prior to this call. """ def set_ownership(self, path, user, group): pass nova-2014.1.5/nova/virt/disk/vfs/guestfs.py0000664000567000056700000001754212540642544021577 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from eventlet import tpool from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.disk.vfs import api as vfs LOG = logging.getLogger(__name__) guestfs = None class VFSGuestFS(vfs.VFS): """This class implements a VFS module that uses the libguestfs APIs to access the disk image. The disk image is never mapped into the host filesystem, thus avoiding any potential for symlink attacks from the guest filesystem. """ def __init__(self, imgfile, imgfmt='raw', partition=None): super(VFSGuestFS, self).__init__(imgfile, imgfmt, partition) global guestfs if guestfs is None: guestfs = __import__('guestfs') self.handle = None def setup_os(self): if self.partition == -1: self.setup_os_inspect() else: self.setup_os_static() def setup_os_static(self): LOG.debug(_("Mount guest OS image %(imgfile)s partition %(part)s"), {'imgfile': self.imgfile, 'part': str(self.partition)}) if self.partition: self.handle.mount_options("", "/dev/sda%d" % self.partition, "/") else: self.handle.mount_options("", "/dev/sda", "/") def setup_os_inspect(self): LOG.debug(_("Inspecting guest OS image %s"), self.imgfile) roots = self.handle.inspect_os() if len(roots) == 0: raise exception.NovaException(_("No operating system found in %s") % self.imgfile) if len(roots) != 1: LOG.debug(_("Multi-boot OS %(roots)s") % {'roots': str(roots)}) raise exception.NovaException( _("Multi-boot operating system found in %s") % self.imgfile) self.setup_os_root(roots[0]) def setup_os_root(self, root): LOG.debug(_("Inspecting guest OS root filesystem %s"), root) mounts = self.handle.inspect_get_mountpoints(root) if len(mounts) == 0: raise exception.NovaException( _("No mount points found in %(root)s of %(imgfile)s") % {'root': root, 'imgfile': self.imgfile}) # the root directory must be mounted first mounts.sort(key=lambda mount: mount[0]) root_mounted = False for mount in mounts: LOG.debug(_("Mounting %(dev)s at %(dir)s") % {'dev': mount[1], 'dir': mount[0]}) try: self.handle.mount_options("", mount[1], mount[0]) root_mounted = True except RuntimeError as e: msg = _("Error mounting %(device)s to %(dir)s in image" " %(imgfile)s with libguestfs (%(e)s)") % \ {'imgfile': self.imgfile, 'device': mount[1], 'dir': mount[0], 'e': e} if root_mounted: LOG.debug(msg) else: raise exception.NovaException(msg) def setup(self): LOG.debug(_("Setting up appliance for %(imgfile)s %(imgfmt)s") % {'imgfile': self.imgfile, 'imgfmt': self.imgfmt}) try: self.handle = tpool.Proxy(guestfs.GuestFS(close_on_exit=False)) except TypeError as e: if 'close_on_exit' in str(e): # NOTE(russellb) In case we're not using a version of # libguestfs new enough to support the close_on_exit paramater, # which was added in libguestfs 1.20. self.handle = tpool.Proxy(guestfs.GuestFS()) else: raise try: self.handle.add_drive_opts(self.imgfile, format=self.imgfmt) self.handle.launch() self.setup_os() self.handle.aug_init("/", 0) except RuntimeError as e: # explicitly teardown instead of implicit close() # to prevent orphaned VMs in cases when an implicit # close() is not enough self.teardown() raise exception.NovaException( _("Error mounting %(imgfile)s with libguestfs (%(e)s)") % {'imgfile': self.imgfile, 'e': e}) except Exception: # explicitly teardown instead of implicit close() # to prevent orphaned VMs in cases when an implicit # close() is not enough self.teardown() raise def teardown(self): LOG.debug(_("Tearing down appliance")) try: try: self.handle.aug_close() except RuntimeError as e: LOG.warn(_("Failed to close augeas %s"), e) try: self.handle.shutdown() except AttributeError: # Older libguestfs versions haven't an explicit shutdown pass except RuntimeError as e: LOG.warn(_("Failed to shutdown appliance %s"), e) try: self.handle.close() except AttributeError: # Older libguestfs versions haven't an explicit close pass except RuntimeError as e: LOG.warn(_("Failed to close guest handle %s"), e) finally: # dereference object and implicitly close() self.handle = None @staticmethod def _canonicalize_path(path): if path[0] != '/': return '/' + path return path def make_path(self, path): LOG.debug(_("Make directory path=%s"), path) path = self._canonicalize_path(path) self.handle.mkdir_p(path) def append_file(self, path, content): LOG.debug(_("Append file path=%s"), path) path = self._canonicalize_path(path) self.handle.write_append(path, content) def replace_file(self, path, content): LOG.debug(_("Replace file path=%s"), path) path = self._canonicalize_path(path) self.handle.write(path, content) def read_file(self, path): LOG.debug(_("Read file path=%s"), path) path = self._canonicalize_path(path) return self.handle.read_file(path) def has_file(self, path): LOG.debug(_("Has file path=%s"), path) path = self._canonicalize_path(path) try: self.handle.stat(path) return True except RuntimeError: return False def set_permissions(self, path, mode): LOG.debug(_("Set permissions path=%(path)s mode=%(mode)s"), {'path': path, 'mode': mode}) path = self._canonicalize_path(path) self.handle.chmod(mode, path) def set_ownership(self, path, user, group): LOG.debug(_("Set ownership path=%(path)s " "user=%(user)s group=%(group)s"), {'path': path, 'user': user, 'group': group}) path = self._canonicalize_path(path) uid = -1 gid = -1 if user is not None: uid = int(self.handle.aug_get( "/files/etc/passwd/" + user + "/uid")) if group is not None: gid = int(self.handle.aug_get( "/files/etc/group/" + group + "/gid")) LOG.debug(_("chown uid=%(uid)d gid=%(gid)s"), {'uid': uid, 'gid': gid}) self.handle.chown(uid, gid, path) nova-2014.1.5/nova/virt/disk/vfs/localfs.py0000664000567000056700000001330112540642544021527 0ustar jenkinsjenkins00000000000000# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.disk.mount import loop from nova.virt.disk.mount import nbd from nova.virt.disk.vfs import api as vfs LOG = logging.getLogger(__name__) class VFSLocalFS(vfs.VFS): """os.path.join() with safety check for injected file paths. Join the supplied path components and make sure that the resulting path we are injecting into is within the mounted guest fs. Trying to be clever and specifying a path with '..' in it will hit this safeguard. """ def _canonical_path(self, path): canonpath, _err = utils.execute( 'readlink', '-nm', os.path.join(self.imgdir, path.lstrip("/")), run_as_root=True) if not canonpath.startswith(os.path.realpath(self.imgdir) + '/'): raise exception.Invalid(_('File path %s not valid') % path) return canonpath """ This class implements a VFS module that is mapped to a virtual root directory present on the host filesystem. This implementation uses the nova.virt.disk.mount.Mount API to make virtual disk images visible in the host filesystem. If the disk format is raw, it will use the loopback mount impl, otherwise it will use the qemu-nbd impl. """ def __init__(self, imgfile, imgfmt="raw", partition=None, imgdir=None): super(VFSLocalFS, self).__init__(imgfile, imgfmt, partition) self.imgdir = imgdir self.mount = None def setup(self): self.imgdir = tempfile.mkdtemp(prefix="openstack-vfs-localfs") try: if self.imgfmt == "raw": LOG.debug(_("Using LoopMount")) mount = loop.LoopMount(self.imgfile, self.imgdir, self.partition) else: LOG.debug(_("Using NbdMount")) mount = nbd.NbdMount(self.imgfile, self.imgdir, self.partition) if not mount.do_mount(): raise exception.NovaException(mount.error) self.mount = mount except Exception as e: with excutils.save_and_reraise_exception(): LOG.debug(_("Failed to mount image %(ex)s)"), {'ex': str(e)}) self.teardown() def teardown(self): try: if self.mount: self.mount.do_teardown() except Exception as e: LOG.debug(_("Failed to unmount %(imgdir)s: %(ex)s") % {'imgdir': self.imgdir, 'ex': str(e)}) try: if self.imgdir: os.rmdir(self.imgdir) except Exception as e: LOG.debug(_("Failed to remove %(imgdir)s: %(ex)s") % {'imgdir': self.imgdir, 'ex': str(e)}) self.imgdir = None self.mount = None def make_path(self, path): LOG.debug(_("Make directory path=%s"), path) canonpath = self._canonical_path(path) utils.execute('mkdir', '-p', canonpath, run_as_root=True) def append_file(self, path, content): LOG.debug(_("Append file path=%s"), path) canonpath = self._canonical_path(path) args = ["-a", canonpath] kwargs = dict(process_input=content, run_as_root=True) utils.execute('tee', *args, **kwargs) def replace_file(self, path, content): LOG.debug(_("Replace file path=%s"), path) canonpath = self._canonical_path(path) args = [canonpath] kwargs = dict(process_input=content, run_as_root=True) utils.execute('tee', *args, **kwargs) def read_file(self, path): LOG.debug(_("Read file path=%s"), path) canonpath = self._canonical_path(path) return utils.read_file_as_root(canonpath) def has_file(self, path): LOG.debug(_("Has file path=%s"), path) canonpath = self._canonical_path(path) exists, _err = utils.trycmd('readlink', '-e', canonpath, run_as_root=True) return exists def set_permissions(self, path, mode): LOG.debug(_("Set permissions path=%(path)s mode=%(mode)o"), {'path': path, 'mode': mode}) canonpath = self._canonical_path(path) utils.execute('chmod', "%o" % mode, canonpath, run_as_root=True) def set_ownership(self, path, user, group): LOG.debug(_("Set permissions path=%(path)s " "user=%(user)s group=%(group)s"), {'path': path, 'user': user, 'group': group}) canonpath = self._canonical_path(path) owner = None cmd = "chown" if group is not None and user is not None: owner = user + ":" + group elif user is not None: owner = user elif group is not None: owner = group cmd = "chgrp" if owner is not None: utils.execute(cmd, owner, canonpath, run_as_root=True) nova-2014.1.5/nova/virt/driver.py0000664000567000056700000014752612540642544017670 0ustar jenkinsjenkins00000000000000# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Driver base-classes: (Beginning of) the contract that compute drivers must follow, and shared types that support that contract """ import sys from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils from nova.virt import event as virtevent driver_opts = [ cfg.StrOpt('compute_driver', help='Driver to use for controlling virtualization. Options ' 'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, ' 'fake.FakeDriver, baremetal.BareMetalDriver, ' 'vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver, ' 'hyperv.HyperVDriver'), cfg.StrOpt('default_ephemeral_format', help='The default format an ephemeral_volume will be ' 'formatted with on creation.'), cfg.StrOpt('preallocate_images', default='none', help='VM image preallocation mode: ' '"none" => no storage provisioning is done up front, ' '"space" => storage is fully allocated at instance start'), cfg.BoolOpt('use_cow_images', default=True, help='Whether to use cow images'), cfg.BoolOpt('vif_plugging_is_fatal', default=True, help="Fail instance boot if vif plugging fails"), cfg.IntOpt('vif_plugging_timeout', default=300, help='Number of seconds to wait for neutron vif plugging ' 'events to arrive before continuing or failing (see ' 'vif_plugging_is_fatal). If this is set to zero and ' 'vif_plugging_is_fatal is False, events should not ' 'be expected to arrive at all.'), ] CONF = cfg.CONF CONF.register_opts(driver_opts) LOG = logging.getLogger(__name__) def driver_dict_from_config(named_driver_config, *args, **kwargs): driver_registry = dict() for driver_str in named_driver_config: driver_type, _sep, driver = driver_str.partition('=') driver_class = importutils.import_class(driver) driver_registry[driver_type] = driver_class(*args, **kwargs) return driver_registry def block_device_info_get_root(block_device_info): block_device_info = block_device_info or {} return block_device_info.get('root_device_name') def block_device_info_get_swap(block_device_info): block_device_info = block_device_info or {} return block_device_info.get('swap') or {'device_name': None, 'swap_size': 0} def swap_is_usable(swap): return swap and swap['device_name'] and swap['swap_size'] > 0 def block_device_info_get_ephemerals(block_device_info): block_device_info = block_device_info or {} ephemerals = block_device_info.get('ephemerals') or [] return ephemerals def block_device_info_get_mapping(block_device_info): block_device_info = block_device_info or {} block_device_mapping = block_device_info.get('block_device_mapping') or [] return block_device_mapping class ComputeDriver(object): """Base class for compute drivers. The interface to this class talks in terms of 'instances' (Amazon EC2 and internal Nova terminology), by which we mean 'running virtual machine' (XenAPI terminology) or domain (Xen or libvirt terminology). An instance has an ID, which is the identifier chosen by Nova to represent the instance further up the stack. This is unfortunately also called a 'name' elsewhere. As far as this layer is concerned, 'instance ID' and 'instance name' are synonyms. Note that the instance ID or name is not human-readable or customer-controlled -- it's an internal ID chosen by Nova. At the nova.virt layer, instances do not have human-readable names at all -- such things are only known higher up the stack. Most virtualization platforms will also have their own identity schemes, to uniquely identify a VM or domain. These IDs must stay internal to the platform-specific layer, and never escape the connection interface. The platform-specific layer is responsible for keeping track of which instance ID maps to which platform-specific ID, and vice versa. Some methods here take an instance of nova.compute.service.Instance. This is the data structure used by nova.compute to store details regarding an instance, and pass them into this layer. This layer is responsible for translating that generic data structure into terms that are specific to the virtualization platform. """ capabilities = { "has_imagecache": False, "supports_recreate": False, } def __init__(self, virtapi): self.virtapi = virtapi self._compute_event_callback = None def init_host(self, host): """Initialize anything that is necessary for the driver to function, including catching up with currently running VM's on the given host. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def cleanup_host(self, host): """Clean up anything that is necessary for the driver gracefully stop, including ending remote sessions. This is optional. """ pass def get_info(self, instance): """Get the current status of an instance, by name (not ID!) :param instance: nova.objects.instance.Instance object Returns a dict containing: :state: the running state, one of the power_state codes :max_mem: (int) the maximum memory in KBytes allowed :mem: (int) the memory in KBytes used by the domain :num_cpu: (int) the number of virtual CPUs for the domain :cpu_time: (int) the CPU time used in nanoseconds """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_num_instances(self): """Return the total number of virtual machines. Return the number of virtual machines that the hypervisor knows about. .. note:: This implementation works for all drivers, but it is not particularly efficient. Maintainers of the virt drivers are encouraged to override this method with something more efficient. """ return len(self.list_instances()) def instance_exists(self, instance_id): """Checks existence of an instance on the host. :param instance_id: The ID / name of the instance to lookup Returns True if an instance with the supplied ID exists on the host, False otherwise. .. note:: This implementation works for all drivers, but it is not particularly efficient. Maintainers of the virt drivers are encouraged to override this method with something more efficient. """ return instance_id in self.list_instances() def estimate_instance_overhead(self, instance_info): """Estimate the virtualization overhead required to build an instance of the given flavor. Defaults to zero, drivers should override if per-instance overhead calculations are desired. :param instance_info: Instance/flavor to calculate overhead for. :returns: Dict of estimated overhead values. """ return {'memory_mb': 0} def list_instances(self): """Return the names of all the instances known to the virtualization layer, as a list. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def list_instance_uuids(self): """Return the UUIDS of all the instances known to the virtualization layer, as a list. """ raise NotImplementedError() def rebuild(self, context, instance, image_meta, injected_files, admin_password, bdms, detach_block_devices, attach_block_devices, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and remakes the VM with given 'metadata' and 'personalities'. This base class method shuts down the VM, detaches all block devices, then spins up the new VM afterwards. It may be overridden by hypervisors that need to - e.g. for optimisations, or when the 'VM' is actually proxied and needs to be held across the shutdown + spin up steps. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param image_meta: image object returned by nova.image.glance that defines the image from which to boot this instance :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param bdms: block-device-mappings to use for rebuild :param detach_block_devices: function to detach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. :param attach_block_devices: function to attach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param recreate: True if the instance is being recreated on a new hypervisor - all the cleanup of old state is skipped. :param block_device_info: Information about block devices to be attached to the instance. :param preserve_ephemeral: True if the default ephemeral storage partition must be preserved on rebuild """ raise NotImplementedError() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param image_meta: image object returned by nova.image.glance that defines the image from which to boot this instance :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param block_device_info: Information about block devices to be attached to the instance. """ raise NotImplementedError() def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy the specified instance from the Hypervisor. If the instance is not found (for example if networking failed), this function should still succeed. It's probably a good idea to log a warning in that case. :param context: security context :param instance: Instance object as returned by DB layer. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param block_device_info: Information about block devices that should be detached from the instance. :param destroy_disks: Indicates if disks should be destroyed """ raise NotImplementedError() def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Cleanup the instance resources . Instance should have been destroyed from the Hypervisor before calling this method. :param context: security context :param instance: Instance object as returned by DB layer. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param block_device_info: Information about block devices that should be detached from the instance. :param destroy_disks: Indicates if disks should be destroyed """ raise NotImplementedError() def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot the specified instance. After this is called successfully, the instance's state goes back to power_state.RUNNING. The virtualization platform should ensure that the reboot action has completed successfully even in cases in which the underlying domain/vm is paused or halted/stopped. :param instance: nova.objects.instance.Instance :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param reboot_type: Either a HARD or SOFT reboot :param block_device_info: Info pertaining to attached volumes :param bad_volumes_callback: Function to handle any bad volumes encountered """ raise NotImplementedError() def get_console_pool_info(self, console_type): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_console_output(self, context, instance): """Get console output for an instance :param context: security context :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def get_vnc_console(self, context, instance): """Get connection info for a vnc console. :param context: security context :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def get_spice_console(self, context, instance): """Get connection info for a spice console. :param context: security context :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def get_rdp_console(self, context, instance): """Get connection info for a rdp console. :param context: security context :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def get_diagnostics(self, instance): """Return data about VM diagnostics. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_all_bw_counters(self, instances): """Return bandwidth usage counters for each interface on each running VM. :param instances: nova.objects.instance.InstanceList """ raise NotImplementedError() def get_all_volume_usage(self, context, compute_host_bdms): """Return usage info for volumes attached to vms on a given host.- """ raise NotImplementedError() def get_host_ip_addr(self): """Retrieves the IP address of the dom0 """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): """Attach the disk to the instance at mountpoint using info.""" raise NotImplementedError() def detach_volume(self, connection_info, instance, mountpoint, encryption=None): """Detach the disk attached to the instance.""" raise NotImplementedError() def swap_volume(self, old_connection_info, new_connection_info, instance, mountpoint): """Replace the disk attached to the instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def attach_interface(self, instance, image_meta, vif): """Attach an interface to the instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def detach_interface(self, instance, vif): """Detach an interface from the instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): """Transfers the disk of a running instance in multiple phases, turning off the instance before the end. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def snapshot(self, context, instance, image_id, update_task_state): """Snapshots the specified instance. :param context: security context :param instance: nova.objects.instance.Instance :param image_id: Reference to a pre-created image that will hold the snapshot. """ raise NotImplementedError() def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): """Completes a resize. :param context: the context for the migration/resize :param migration: the migrate/resize information :param instance: nova.objects.instance.Instance being migrated/resized :param disk_info: the newly transferred disk information :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param image_meta: image object returned by nova.image.glance that defines the image from which this instance was created :param resize_instance: True if the instance is being resized, False otherwise :param block_device_info: instance volume block device info :param power_on: True if the instance should be powered on, False otherwise """ raise NotImplementedError() def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): """Finish reverting a resize. :param context: the context for the finish_revert_migration :param instance: nova.objects.instance.Instance being migrated/resized :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param block_device_info: instance volume block device info :param power_on: True if the instance should be powered on, False otherwise """ raise NotImplementedError() def pause(self, instance): """Pause the specified instance. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unpause(self, instance): """Unpause paused VM instance. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def suspend(self, instance): """suspend the specified instance. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def resume(self, context, instance, network_info, block_device_info=None): """resume the specified instance. :param context: the context for the resume :param instance: nova.objects.instance.Instance being resumed :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param block_device_info: instance volume block device info """ raise NotImplementedError() def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def set_bootable(self, instance, is_bootable): """Set the ability to power on/off an instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def unrescue(self, instance, network_info): """Unrescue the specified instance. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def power_off(self, instance): """Power off the specified instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def soft_delete(self, instance): """Soft delete the specified instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def restore(self, instance): """Restore the specified instance. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: node which the caller want to get resources from a driver that manages only one node can safely ignore this :returns: Dictionary describing resources """ raise NotImplementedError() def pre_live_migration(self, ctxt, instance, block_device_info, network_info, disk_info, migrate_data=None): """Prepare an instance for live migration :param ctxt: security context :param instance: nova.objects.instance.Instance object :param block_device_info: instance block device information :param network_info: instance network information :param disk_info: instance disk information :param migrate_data: implementation specific data dict. """ raise NotImplementedError() def live_migration(self, ctxt, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Live migration of an instance to another host. :param ctxt: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager.post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :param block_migration: if true, migrate VM disk. :param migrate_data: implementation specific params. """ raise NotImplementedError() def rollback_live_migration_at_destination(self, ctxt, instance_ref, network_info, block_device_info): """Clean up destination node after a failed live migration. :param ctxt: security context :param instance_ref: instance object that was being migrated :param network_info: instance network information :param block_device_info: instance block device information """ raise NotImplementedError() def post_live_migration(self, ctxt, instance_ref, block_device_info, migrate_data=None): """Post operation of live migration at source host. :param ctxt: security context :instance_ref: instance object that was migrated :block_device_info: instance block device information :param migrate_data: if not None, it is a dict which has data """ pass def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, block_migration=False, block_device_info=None): """Post operation of live migration at destination host. :param ctxt: security context :param instance_ref: instance object that is migrated :param network_info: instance network information :param block_migration: if true, post operation of block_migration. """ raise NotImplementedError() def check_instance_shared_storage_local(self, ctxt, instance): """Check if instance files located on shared storage. This runs check on the destination host, and then calls back to the source host to check the results. :param ctxt: security context :param instance: nova.db.sqlalchemy.models.Instance """ raise NotImplementedError() def check_instance_shared_storage_remote(self, ctxt, data): """Check if instance files located on shared storage. :param context: security context :param data: result of check_instance_shared_storage_local """ raise NotImplementedError() def check_instance_shared_storage_cleanup(self, ctxt, data): """Do cleanup on host after check_instance_shared_storage calls :param ctxt: security context :param data: result of check_instance_shared_storage_local """ pass def check_can_live_migrate_destination(self, ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param ctxt: security context :param instance_ref: nova.db.sqlalchemy.models.Instance :param src_compute_info: Info about the sending machine :param dst_compute_info: Info about the receiving machine :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a dict containing migration info (hypervisor-dependent) """ raise NotImplementedError() def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param ctxt: security context :param dest_check_data: result of check_can_live_migrate_destination """ raise NotImplementedError() def check_can_live_migrate_source(self, context, instance_ref, dest_check_data, block_device_info=None): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination :param block_device_info: result of _get_instance_block_device_info :returns: a dict containing migration info (hypervisor-dependent) """ raise NotImplementedError() def refresh_security_group_rules(self, security_group_id): """This method is called after a change to security groups. All security groups and their associated rules live in the datastore, and calling this method should apply the updated rules to instances running the specified security group. An error should be raised if the operation cannot complete. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_security_group_members(self, security_group_id): """This method is called when a security group is added to an instance. This message is sent to the virtualization drivers on hosts that are running an instance that belongs to a security group that has a rule that references the security group identified by `security_group_id`. It is the responsibility of this method to make sure any rules that authorize traffic flow with members of the security group are updated and any new members can communicate, and any removed members cannot. Scenario: * we are running on host 'H0' and we have an instance 'i-0'. * instance 'i-0' is a member of security group 'speaks-b' * group 'speaks-b' has an ingress rule that authorizes group 'b' * another host 'H1' runs an instance 'i-1' * instance 'i-1' is a member of security group 'b' When 'i-1' launches or terminates we will receive the message to update members of group 'b', at which time we will make any changes needed to the rules for instance 'i-0' to allow or deny traffic coming from 'i-1', depending on if it is being added or removed from the group. In this scenario, 'i-1' could just as easily have been running on our host 'H0' and this method would still have been called. The point was that this method isn't called on the host where instances of that group are running (as is the case with :py:meth:`refresh_security_group_rules`) but is called where references are made to authorizing those instances. An error should be raised if the operation cannot complete. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_provider_fw_rules(self): """This triggers a firewall update based on database changes. When this is called, rules have either been added or removed from the datastore. You can retrieve rules with :py:meth:`nova.db.provider_fw_rule_get_all`. Provider rules take precedence over security group rules. If an IP would be allowed by a security group ingress rule, but blocked by a provider rule, then packets from the IP are dropped. This includes intra-project traffic in the case of the allow_project_net_traffic flag for the libvirt-derived classes. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def refresh_instance_security_rules(self, instance): """Refresh security group rules Gets called when an instance gets added to or removed from the security group the instance is a member of or if the group gains or loses a rule. """ raise NotImplementedError() def reset_network(self, instance): """reset networking for specified instance.""" # TODO(Vek): Need to pass context in for access to auth_token pass def ensure_filtering_rules_for_instance(self, instance, network_info): """Setting up filtering rules and waiting for its completion. To migrate an instance, filtering rules to hypervisors and firewalls are inevitable on destination host. ( Waiting only for filtering rules to hypervisor, since filtering rules to firewall rules can be set faster). Concretely, the below method must be called. - setup_basic_filtering (for nova-basic, etc.) - prepare_instance_filter(for nova-instance-instance-xxx, etc.) to_xml may have to be called since it defines PROJNET, PROJMASK. but libvirt migrates those value through migrateToURI(), so , no need to be called. Don't use thread for this method since migration should not be started when setting-up filtering rules operations are not completed. :param instance: nova.objects.instance.Instance object """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def filter_defer_apply_on(self): """Defer application of IPTables rules.""" pass def filter_defer_apply_off(self): """Turn off deferral of IPTables rules and apply the rules now.""" pass def unfilter_instance(self, instance, network_info): """Stop filtering instance.""" # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def set_admin_password(self, context, instance, new_pass=None): """Set the root password on the specified instance. :param instance: nova.objects.instance.Instance :param new_password: the new password """ raise NotImplementedError() def inject_file(self, instance, b64_path, b64_contents): """Writes a file on the specified instance. The first parameter is an instance of nova.compute.service.Instance, and so the instance is being specified as instance.name. The second parameter is the base64-encoded path to which the file is to be written on the instance; the third is the contents of the file, also base64-encoded. NOTE(russellb) This method is deprecated and will be removed once it can be removed from nova.compute.manager. """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def change_instance_metadata(self, context, instance, diff): """Applies a diff to the instance metadata. This is an optional driver method which is used to publish changes to the instance's metadata to the hypervisor. If the hypervisor has no means of publishing the instance metadata to the instance, then this method should not be implemented. :param context: security context :param instance: nova.objects.instance.Instance """ pass def inject_network_info(self, instance, nw_info): """inject network info for specified instance.""" # TODO(Vek): Need to pass context in for access to auth_token pass def poll_rebooting_instances(self, timeout, instances): """Poll for rebooting instances :param timeout: the currently configured timeout for considering rebooting instances to be stuck :param instances: instances that have been in rebooting state longer than the configured timeout """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def host_power_action(self, host, action): """Reboots, shuts down or powers up the host.""" raise NotImplementedError() def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ raise NotImplementedError() def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def get_host_uptime(self, host): """Returns the result of calling "uptime" on the target host.""" # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def plug_vifs(self, instance, network_info): """Plug VIFs into networks. :param instance: nova.objects.instance.Instance """ # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks. :param instance: nova.objects.instance.Instance """ raise NotImplementedError() def get_host_stats(self, refresh=False): """Return currently known host stats. If the hypervisor supports pci passthrough, the returned dictionary includes a key-value pair for it. The key of pci passthrough device is "pci_passthrough_devices" and the value is a json string for the list of assignable pci devices. Each device is a dictionary, with mandatory keys of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id', 'label' and other optional device specific information. Refer to the objects/pci_device.py for more idea of these keys. """ raise NotImplementedError() def get_host_cpu_stats(self): """Get the currently known host CPU stats. :returns: a dict containing the CPU stat info, eg: {'kernel': kern, 'idle': idle, 'user': user, 'iowait': wait, 'frequency': freq}, where kern and user indicate the cumulative CPU time (nanoseconds) spent by kernel and user processes respectively, idle indicates the cumulative idle CPU time (nanoseconds), wait indicates the cumulative I/O wait CPU time (nanoseconds), since the host is booting up; freq indicates the current CPU frequency (MHz). All values are long integers. """ raise NotImplementedError() def block_stats(self, instance_name, disk_id): """Return performance counters associated with the given disk_id on the given instance_name. These are returned as [rd_req, rd_bytes, wr_req, wr_bytes, errs], where rd indicates read, wr indicates write, req is the total number of I/O requests made, bytes is the total number of bytes transferred, and errs is the number of requests held up due to a full pipeline. All counters are long integers. This method is optional. On some platforms (e.g. XenAPI) performance statistics can be retrieved directly in aggregate form, without Nova having to do the aggregation. On those platforms, this method is unused. Note that this function takes an instance ID. """ raise NotImplementedError() def interface_stats(self, instance_name, iface_id): """Return performance counters associated with the given iface_id on the given instance_id. These are returned as [rx_bytes, rx_packets, rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx indicates receive, tx indicates transmit, bytes and packets indicate the total number of bytes or packets transferred, and errs and dropped is the total number of packets failed / dropped. All counters are long integers. This method is optional. On some platforms (e.g. XenAPI) performance statistics can be retrieved directly in aggregate form, without Nova having to do the aggregation. On those platforms, this method is unused. Note that this function takes an instance ID. """ raise NotImplementedError() def macs_for_instance(self, instance): """What MAC addresses must this instance have? Some hypervisors (such as bare metal) cannot do freeform virtualisation of MAC addresses. This method allows drivers to return a set of MAC addresses that the instance is to have. allocate_for_instance will take this into consideration when provisioning networking for the instance. Mapping of MAC addresses to actual networks (or permitting them to be freeform) is up to the network implementation layer. For instance, with openflow switches, fixed MAC addresses can still be virtualised onto any L2 domain, with arbitrary VLANs etc, but regular switches require pre-configured MAC->network mappings that will match the actual configuration. Most hypervisors can use the default implementation which returns None. Hypervisors with MAC limits should return a set of MAC addresses, which will be supplied to the allocate_for_instance call by the compute manager, and it is up to that call to ensure that all assigned network details are compatible with the set of MAC addresses. This is called during spawn_instance by the compute manager. :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])). None means 'no constraints', a set means 'these and only these MAC addresses'. """ return None def dhcp_options_for_instance(self, instance): """Get DHCP options for this instance. Some hypervisors (such as bare metal) require that instances boot from the network, and manage their own TFTP service. This requires passing the appropriate options out to the DHCP service. Most hypervisors can use the default implementation which returns None. This is called during spawn_instance by the compute manager. Note that the format of the return value is specific to Quantum client API. :return: None, or a set of DHCP options, eg: [{'opt_name': 'bootfile-name', 'opt_value': '/tftpboot/path/to/config'}, {'opt_name': 'server-ip-address', 'opt_value': '1.2.3.4'}, {'opt_name': 'tftp-server', 'opt_value': '1.2.3.4'} ] """ pass def manage_image_cache(self, context, all_instances): """Manage the driver's local image cache. Some drivers chose to cache images for instances on disk. This method is an opportunity to do management of that cache which isn't directly related to other calls into the driver. The prime example is to clean the cache and remove images which are no longer of interest. :param instances: nova.objects.instance.InstanceList """ pass def add_to_aggregate(self, context, aggregate, host, **kwargs): """Add a compute host to an aggregate.""" #NOTE(jogo) Currently only used for XenAPI-Pool raise NotImplementedError() def remove_from_aggregate(self, context, aggregate, host, **kwargs): """Remove a compute host from an aggregate.""" raise NotImplementedError() def undo_aggregate_operation(self, context, op, aggregate, host, set_error=True): """Undo for Resource Pools.""" raise NotImplementedError() def get_volume_connector(self, instance): """Get connector information for the instance for attaching to volumes. Connector information is a dictionary representing the ip of the machine that will be making the connection, the name of the iscsi initiator and the hostname of the machine as follows:: { 'ip': ip, 'initiator': initiator, 'host': hostname } """ raise NotImplementedError() def get_available_nodes(self, refresh=False): """Returns nodenames of all nodes managed by the compute service. This method is for multi compute-nodes support. If a driver supports multi compute-nodes, this method returns a list of nodenames managed by the service. Otherwise, this method should return [hypervisor_hostname]. """ stats = self.get_host_stats(refresh=refresh) if not isinstance(stats, list): stats = [stats] return [s['hypervisor_hostname'] for s in stats] def node_is_available(self, nodename): """Return whether this compute service manages a particular node.""" if nodename in self.get_available_nodes(): return True # Refresh and check again. return nodename in self.get_available_nodes(refresh=True) def get_per_instance_usage(self): """Get information about instance resource usage. :returns: dict of nova uuid => dict of usage info """ return {} def instance_on_disk(self, instance): """Checks access of instance files on the host. :param instance: nova.objects.instance.Instance to lookup Returns True if files of an instance with the supplied ID accessible on the host, False otherwise. .. note:: Used in rebuild for HA implementation and required for validation of access to instance shared disk files """ return False def register_event_listener(self, callback): """Register a callback to receive events. Register a callback to receive asynchronous event notifications from hypervisors. The callback will be invoked with a single parameter, which will be an instance of the nova.virt.event.Event class. """ self._compute_event_callback = callback def emit_event(self, event): """Dispatches an event to the compute manager. Invokes the event callback registered by the compute manager to dispatch the event. This must only be invoked from a green thread. """ if not self._compute_event_callback: LOG.debug(_("Discarding event %s") % str(event)) return if not isinstance(event, virtevent.Event): raise ValueError( _("Event must be an instance of nova.virt.event.Event")) try: LOG.debug(_("Emitting event %s") % str(event)) self._compute_event_callback(event) except Exception as ex: LOG.error(_("Exception dispatching event %(event)s: %(ex)s"), {'event': event, 'ex': ex}) def delete_instance_files(self, instance): """Delete any lingering instance files for an instance. :param instance: nova.objects.instance.Instance :returns: True if the instance was deleted from disk, False otherwise. """ return True @property def need_legacy_block_device_info(self): """Tell the caller if the driver requires legacy block device info. Tell the caller weather we expect the legacy format of block device info to be passed in to methods that expect it. """ return True def volume_snapshot_create(self, context, instance, volume_id, create_info): """Snapshots volumes attached to a specified instance. :param context: request context :param instance: nova.objects.instance.Instance that has the volume attached :param volume_id: Volume to be snapshotted :param create_info: The data needed for nova to be able to attach to the volume. This is the same data format returned by Cinder's initialize_connection() API call. In the case of doing a snapshot, it is the image file Cinder expects to be used as the active disk after the snapshot operation has completed. There may be other data included as well that is needed for creating the snapshot. """ raise NotImplementedError() def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info): """Snapshots volumes attached to a specified instance. :param context: request context :param instance: nova.objects.instance.Instance that has the volume attached :param volume_id: Attached volume associated with the snapshot :param snapshot_id: The snapshot to delete. :param delete_info: Volume backend technology specific data needed to be able to complete the snapshot. For example, in the case of qcow2 backed snapshots, this would include the file being merged, and the file being merged into (if appropriate). """ raise NotImplementedError() def default_root_device_name(self, instance, image_meta, root_bdm): """Provide a default root device name for the driver.""" raise NotImplementedError() def default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): """Default the missing device names in the block device mapping.""" raise NotImplementedError() def load_compute_driver(virtapi, compute_driver=None): """Load a compute driver module. Load the compute driver module specified by the compute_driver configuration option or, if supplied, the driver name supplied as an argument. Compute drivers constructors take a VirtAPI object as their first object and this must be supplied. :param virtapi: a VirtAPI instance :param compute_driver: a compute driver name to override the config opt :returns: a ComputeDriver instance """ if not compute_driver: compute_driver = CONF.compute_driver if not compute_driver: LOG.error(_("Compute driver option required, but not specified")) sys.exit(1) LOG.info(_("Loading compute driver '%s'") % compute_driver) try: driver = importutils.import_object_ns('nova.virt', compute_driver, virtapi) return utils.check_isinstance(driver, ComputeDriver) except ImportError: LOG.exception(_("Unable to load the virtualization driver")) sys.exit(1) def compute_driver_matches(match): return CONF.compute_driver and CONF.compute_driver.endswith(match) nova-2014.1.5/nova/virt/imagehandler/0000775000567000056700000000000012540643452020423 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/imagehandler/__init__.py0000664000567000056700000001525212540642544022542 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handling of VM disk images by handler. """ import urlparse from oslo.config import cfg import stevedore from nova import exception from nova.image import glance from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) image_opts = [ cfg.ListOpt('image_handlers', default=['download'], help='Specifies which image handler extension names to use ' 'for handling images. The first extension in the list ' 'which can handle the image with a suitable location ' 'will be used.'), ] CONF = cfg.CONF CONF.register_opts(image_opts) _IMAGE_HANDLERS = [] _IMAGE_HANDLERS_ASSO = {} def _image_handler_asso(handler, path, location, image_meta): _IMAGE_HANDLERS_ASSO[path] = (handler, location, image_meta) def _image_handler_disasso(handler, path): _IMAGE_HANDLERS_ASSO.pop(path, None) def _match_locations(locations, schemes): matched = [] if locations and (schemes is not None): for loc in locations: # Note(zhiyan): location = {'url': 'string', # 'metadata': {...}} if len(schemes) == 0: # Note(zhiyan): handler has not scheme limitation. matched.append(loc) elif urlparse.urlparse(loc['url']).scheme in schemes: matched.append(loc) return matched def load_image_handlers(driver): """Loading construct user configured image handlers. Handler objects will be cached to keep handler instance as singleton since this structure need support follow sub-class development, developer could implement particular sub-class in relevant hypervisor layer with more advanced functions. The handler's __init__() need do some re-preparing work if it needed, for example when nova-compute service restart or host reboot, CinderImageHandler will need to re-prepare iscsi/fc link for volumes those already be cached on compute host as template image previously. """ global _IMAGE_HANDLERS, _IMAGE_HANDLERS_ASSO if _IMAGE_HANDLERS: _IMAGE_HANDLERS = [] _IMAGE_HANDLERS_ASSO = {} # for de-duplicate. using ordereddict lib to support both py26 and py27? processed_handler_names = [] ex = stevedore.extension.ExtensionManager('nova.virt.image.handlers') for name in CONF.image_handlers: if not name: continue name = name.strip() if name in processed_handler_names: LOG.warn(_("Duplicated handler extension name in 'image_handlers' " "option: %s, skip."), name) continue elif name not in ex.names(): LOG.warn(_("Invalid handler extension name in 'image_handlers' " "option: %s, skip."), name) continue processed_handler_names.append(name) try: mgr = stevedore.driver.DriverManager( namespace='nova.virt.image.handlers', name=name, invoke_on_load=True, invoke_kwds={"driver": driver, "associate_fn": _image_handler_asso, "disassociate_fn": _image_handler_disasso}) _IMAGE_HANDLERS.append(mgr.driver) except Exception as err: LOG.warn(_("Failed to import image handler extension " "%(name)s: %(err)s"), {'name': name, 'err': err}) def handle_image(context=None, image_id=None, user_id=None, project_id=None, target_path=None): """Handle image using available handles. This generator will return each available handler on each time. :param context: Request context :param image_id: The opaque image identifier :param user_id: Request user id :param project_id: Request project id :param target_path: Where the image data to write :raises NoImageHandlerAvailable: if no any image handler specified in the configuration is available for this request. """ handled = False if target_path is not None: target_path = target_path.strip() # Check if target image has been handled before, # we can using previous handler process it again directly. if target_path and _IMAGE_HANDLERS_ASSO: ret = _IMAGE_HANDLERS_ASSO.get(target_path) if ret: (image_handler, location, image_meta) = ret yield image_handler, location, image_meta handled = image_handler.last_ops_handled() image_meta = None if not handled and _IMAGE_HANDLERS: if context and image_id: (image_service, _image_id) = glance.get_remote_image_service( context, image_id) image_meta = image_service.show(context, image_id) # Note(zhiyan): Glance maybe can not receive image # location property since Glance disabled it by default. img_locs = image_service.get_locations(context, image_id) for image_handler in _IMAGE_HANDLERS: matched_locs = _match_locations(img_locs, image_handler.get_schemes()) for loc in matched_locs: yield image_handler, loc, image_meta handled = image_handler.last_ops_handled() if handled: return if not handled: # Note(zhiyan): using location-independent handler do it. for image_handler in _IMAGE_HANDLERS: if len(image_handler.get_schemes()) == 0: yield image_handler, None, image_meta handled = image_handler.last_ops_handled() if handled: return if not handled: LOG.error(_("Can not handle image: %(image_id)s %(target_path)s"), {'image_id': image_id, 'target_path': target_path}) raise exception.NoImageHandlerAvailable(image_id=image_id) nova-2014.1.5/nova/virt/fake.py0000664000567000056700000003675212540642544017301 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A fake (in-memory) hypervisor+api. Allows nova testing w/o a hypervisor. This module also documents the semantics of real hypervisor connections. """ import contextlib from oslo.config import cfg from nova.compute import power_state from nova.compute import task_states from nova import db from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import utils from nova.virt import driver from nova.virt import virtapi CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') LOG = logging.getLogger(__name__) _FAKE_NODES = None def set_nodes(nodes): """Sets FakeDriver's node.list. It has effect on the following methods: get_available_nodes() get_available_resource get_host_stats() To restore the change, call restore_nodes() """ global _FAKE_NODES _FAKE_NODES = nodes def restore_nodes(): """Resets FakeDriver's node list modified by set_nodes(). Usually called from tearDown(). """ global _FAKE_NODES _FAKE_NODES = [CONF.host] class FakeInstance(object): def __init__(self, name, state): self.name = name self.state = state def __getitem__(self, key): return getattr(self, key) class FakeDriver(driver.ComputeDriver): capabilities = { "has_imagecache": True, "supports_recreate": True, } """Fake hypervisor driver.""" def __init__(self, virtapi, read_only=False): super(FakeDriver, self).__init__(virtapi) self.instances = {} self.host_status_base = { 'vcpus': 100000, 'memory_mb': 8000000000, 'local_gb': 600000000000, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 100000000000, 'hypervisor_type': 'fake', 'hypervisor_version': utils.convert_version_to_int('1.0'), 'hypervisor_hostname': CONF.host, 'cpu_info': {}, 'disk_available_least': 500000000000, 'supported_instances': [(None, 'fake', None)], } self._mounts = {} self._interfaces = {} if not _FAKE_NODES: set_nodes([CONF.host]) def init_host(self, host): return def list_instances(self): return self.instances.keys() def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" pass def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks.""" pass def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): name = instance['name'] state = power_state.RUNNING fake_instance = FakeInstance(name, state) self.instances[name] = fake_instance def snapshot(self, context, instance, name, update_task_state): if instance['name'] not in self.instances: raise exception.InstanceNotRunning(instance_id=instance['uuid']) update_task_state(task_state=task_states.IMAGE_UPLOADING) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): pass @staticmethod def get_host_ip_addr(): return '192.168.0.1' def set_admin_password(self, instance, new_pass): pass def inject_file(self, instance, b64_path, b64_contents): pass def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): pass def rescue(self, context, instance, network_info, image_meta, rescue_password): pass def unrescue(self, instance, network_info): pass def poll_rebooting_instances(self, timeout, instances): pass def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): pass def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): pass def post_live_migration_at_destination(self, context, instance, network_info, block_migration=False, block_device_info=None): pass def power_off(self, instance): pass def power_on(self, context, instance, network_info, block_device_info): pass def soft_delete(self, instance): pass def restore(self, instance): pass def pause(self, instance): pass def unpause(self, instance): pass def suspend(self, instance): pass def resume(self, context, instance, network_info, block_device_info=None): pass def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): key = instance['name'] if key in self.instances: del self.instances[key] else: LOG.warning(_("Key '%(key)s' not in instances '%(inst)s'") % {'key': key, 'inst': self.instances}, instance=instance) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): pass def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): """Attach the disk to the instance at mountpoint using info.""" instance_name = instance['name'] if instance_name not in self._mounts: self._mounts[instance_name] = {} self._mounts[instance_name][mountpoint] = connection_info return True def detach_volume(self, connection_info, instance, mountpoint, encryption=None): """Detach the disk attached to the instance.""" try: del self._mounts[instance['name']][mountpoint] except KeyError: pass return True def swap_volume(self, old_connection_info, new_connection_info, instance, mountpoint): """Replace the disk attached to the instance.""" instance_name = instance['name'] if instance_name not in self._mounts: self._mounts[instance_name] = {} self._mounts[instance_name][mountpoint] = new_connection_info return True def attach_interface(self, instance, image_meta, vif): if vif['id'] in self._interfaces: raise exception.InterfaceAttachFailed('duplicate') self._interfaces[vif['id']] = vif def detach_interface(self, instance, vif): try: del self._interfaces[vif['id']] except KeyError: raise exception.InterfaceDetachFailed('not attached') def get_info(self, instance): if instance['name'] not in self.instances: raise exception.InstanceNotFound(instance_id=instance['name']) i = self.instances[instance['name']] return {'state': i.state, 'max_mem': 0, 'mem': 0, 'num_cpu': 2, 'cpu_time': 0} def get_diagnostics(self, instance_name): return {'cpu0_time': 17300000000, 'memory': 524288, 'vda_errors': -1, 'vda_read': 262144, 'vda_read_req': 112, 'vda_write': 5778432, 'vda_write_req': 488, 'vnet1_rx': 2070139, 'vnet1_rx_drop': 0, 'vnet1_rx_errors': 0, 'vnet1_rx_packets': 26701, 'vnet1_tx': 140208, 'vnet1_tx_drop': 0, 'vnet1_tx_errors': 0, 'vnet1_tx_packets': 662, } def get_all_bw_counters(self, instances): """Return bandwidth usage counters for each interface on each running VM. """ bw = [] return bw def get_all_volume_usage(self, context, compute_host_bdms): """Return usage info for volumes attached to vms on a given host. """ volusage = [] return volusage def get_host_cpu_stats(self): stats = {'kernel': 5664160000000L, 'idle': 1592705190000000L, 'user': 26728850000000L, 'iowait': 6121490000000L} stats['frequency'] = 800 return stats def block_stats(self, instance_name, disk_id): return [0L, 0L, 0L, 0L, None] def interface_stats(self, instance_name, iface_id): return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] def get_console_output(self, context, instance): return 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE' def get_vnc_console(self, context, instance): return {'internal_access_path': 'FAKE', 'host': 'fakevncconsole.com', 'port': 6969} def get_spice_console(self, context, instance): return {'internal_access_path': 'FAKE', 'host': 'fakespiceconsole.com', 'port': 6969, 'tlsPort': 6970} def get_rdp_console(self, context, instance): return {'internal_access_path': 'FAKE', 'host': 'fakerdpconsole.com', 'port': 6969} def get_console_pool_info(self, console_type): return {'address': '127.0.0.1', 'username': 'fakeuser', 'password': 'fakepassword'} def refresh_security_group_rules(self, security_group_id): return True def refresh_security_group_members(self, security_group_id): return True def refresh_instance_security_rules(self, instance): return True def refresh_provider_fw_rules(self): pass def get_available_resource(self, nodename): """Updates compute manager resource info on ComputeNode table. Since we don't have a real hypervisor, pretend we have lots of disk and ram. """ if nodename not in _FAKE_NODES: return {} dic = {'vcpus': 1, 'memory_mb': 8192, 'local_gb': 1028, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'hypervisor_type': 'fake', 'hypervisor_version': '1.0', 'hypervisor_hostname': nodename, 'disk_available_least': 0, 'cpu_info': '?', 'supported_instances': jsonutils.dumps([(None, 'fake', None)]) } return dic def ensure_filtering_rules_for_instance(self, instance_ref, network_info): return def get_instance_disk_info(self, instance_name): return def live_migration(self, context, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): post_method(context, instance_ref, dest, block_migration, migrate_data) return def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): return def check_can_live_migrate_destination(self, ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): return {} def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data, block_device_info=None): return def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): return def confirm_migration(self, migration, instance, network_info): return def pre_live_migration(self, context, instance_ref, block_device_info, network_info, disk, migrate_data=None): return def unfilter_instance(self, instance_ref, network_info): return def test_remove_vm(self, instance_name): """Removes the named VM, as if it crashed. For testing.""" self.instances.pop(instance_name) def get_host_stats(self, refresh=False): """Return fake Host Status of ram, disk, network.""" stats = [] for nodename in _FAKE_NODES: host_status = self.host_status_base.copy() host_status['hypervisor_hostname'] = nodename host_status['host_hostname'] = nodename host_status['host_name_label'] = nodename stats.append(host_status) if len(stats) == 0: raise exception.NovaException("FakeDriver has no node") elif len(stats) == 1: return stats[0] else: return stats def host_power_action(self, host, action): """Reboots, shuts down or powers up the host.""" return action def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ if not mode: return 'off_maintenance' return 'on_maintenance' def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" if enabled: return 'enabled' return 'disabled' def get_disk_available_least(self): pass def get_volume_connector(self, instance): return {'ip': '127.0.0.1', 'initiator': 'fake', 'host': 'fakehost'} def get_available_nodes(self, refresh=False): return _FAKE_NODES def instance_on_disk(self, instance): return False def list_instance_uuids(self): return [] class FakeVirtAPI(virtapi.VirtAPI): def instance_update(self, context, instance_uuid, updates): return db.instance_update_and_get_original(context, instance_uuid, updates) def provider_fw_rule_get_all(self, context): return db.provider_fw_rule_get_all(context) def agent_build_get_by_triple(self, context, hypervisor, os, architecture): return db.agent_build_get_by_triple(context, hypervisor, os, architecture) @contextlib.contextmanager def wait_for_instance_event(self, instance, event_names, deadline=300, error_callback=None): # NOTE(danms): Don't actually wait for any events, just # fall through yield nova-2014.1.5/nova/virt/hyperv/0000775000567000056700000000000012540643452017320 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/hyperv/migrationops.py0000664000567000056700000003107112540642544022410 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for migration / resize operations. """ import os from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova.virt import configdrive from nova.virt.hyperv import imagecache from nova.virt.hyperv import utilsfactory from nova.virt.hyperv import vmops from nova.virt.hyperv import vmutils from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) class MigrationOps(object): def __init__(self): self._hostutils = utilsfactory.get_hostutils() self._vmutils = utilsfactory.get_vmutils() self._vhdutils = utilsfactory.get_vhdutils() self._pathutils = utilsfactory.get_pathutils() self._volumeops = volumeops.VolumeOps() self._vmops = vmops.VMOps() self._imagecache = imagecache.ImageCache() def _migrate_disk_files(self, instance_name, disk_files, dest): # TODO(mikal): it would be nice if this method took a full instance, # because it could then be passed to the log messages below. same_host = False if dest in self._hostutils.get_local_ips(): same_host = True LOG.debug(_("Migration target is the source host")) else: LOG.debug(_("Migration target host: %s") % dest) instance_path = self._pathutils.get_instance_dir(instance_name) revert_path = self._pathutils.get_instance_migr_revert_dir( instance_name, remove_dir=True) dest_path = None try: if same_host: # Since source and target are the same, we copy the files to # a temporary location before moving them into place dest_path = '%s_tmp' % instance_path if self._pathutils.exists(dest_path): self._pathutils.rmtree(dest_path) self._pathutils.makedirs(dest_path) else: dest_path = self._pathutils.get_instance_dir( instance_name, dest, remove_dir=True) for disk_file in disk_files: # Skip the config drive as the instance is already configured if os.path.basename(disk_file).lower() != 'configdrive.vhd': LOG.debug(_('Copying disk "%(disk_file)s" to ' '"%(dest_path)s"'), {'disk_file': disk_file, 'dest_path': dest_path}) self._pathutils.copy(disk_file, dest_path) self._pathutils.rename(instance_path, revert_path) if same_host: self._pathutils.rename(dest_path, instance_path) except Exception: with excutils.save_and_reraise_exception(): self._cleanup_failed_disk_migration(instance_path, revert_path, dest_path) def _cleanup_failed_disk_migration(self, instance_path, revert_path, dest_path): try: if dest_path and self._pathutils.exists(dest_path): self._pathutils.rmtree(dest_path) if self._pathutils.exists(revert_path): self._pathutils.rename(revert_path, instance_path) except Exception as ex: # Log and ignore this exception LOG.exception(ex) LOG.error(_("Cannot cleanup migration files")) def _check_target_flavor(self, instance, flavor): new_root_gb = flavor['root_gb'] curr_root_gb = instance['root_gb'] if new_root_gb < curr_root_gb: raise exception.InstanceFaultRollback( vmutils.VHDResizeException( _("Cannot resize the root disk to a smaller size. " "Current size: %(curr_root_gb)s GB. Requested size: " "%(new_root_gb)s GB") % {'curr_root_gb': curr_root_gb, 'new_root_gb': new_root_gb})) def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): LOG.debug(_("migrate_disk_and_power_off called"), instance=instance) self._check_target_flavor(instance, flavor) self._vmops.power_off(instance) instance_name = instance["name"] (disk_files, volume_drives) = self._vmutils.get_vm_storage_paths(instance_name) if disk_files: self._migrate_disk_files(instance_name, disk_files, dest) self._vmops.destroy(instance, destroy_disks=False) # disk_info is not used return "" def confirm_migration(self, migration, instance, network_info): LOG.debug(_("confirm_migration called"), instance=instance) self._pathutils.get_instance_migr_revert_dir(instance['name'], remove_dir=True) def _revert_migration_files(self, instance_name): instance_path = self._pathutils.get_instance_dir( instance_name, create_dir=False, remove_dir=True) revert_path = self._pathutils.get_instance_migr_revert_dir( instance_name) self._pathutils.rename(revert_path, instance_path) def _check_and_attach_config_drive(self, instance): if configdrive.required_by(instance): configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path: self._vmops.attach_config_drive(instance, configdrive_path) else: raise vmutils.HyperVException( _("Config drive is required by instance: %s, " "but it does not exist.") % instance.name) def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): LOG.debug(_("finish_revert_migration called"), instance=instance) instance_name = instance['name'] self._revert_migration_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name) self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path) self._check_and_attach_config_drive(instance) if power_on: self._vmops.power_on(instance) def _merge_base_vhd(self, diff_vhd_path, base_vhd_path): base_vhd_copy_path = os.path.join(os.path.dirname(diff_vhd_path), os.path.basename(base_vhd_path)) try: LOG.debug(_('Copying base disk %(base_vhd_path)s to ' '%(base_vhd_copy_path)s'), {'base_vhd_path': base_vhd_path, 'base_vhd_copy_path': base_vhd_copy_path}) self._pathutils.copyfile(base_vhd_path, base_vhd_copy_path) LOG.debug(_("Reconnecting copied base VHD " "%(base_vhd_copy_path)s and diff " "VHD %(diff_vhd_path)s"), {'base_vhd_copy_path': base_vhd_copy_path, 'diff_vhd_path': diff_vhd_path}) self._vhdutils.reconnect_parent_vhd(diff_vhd_path, base_vhd_copy_path) LOG.debug(_("Merging base disk %(base_vhd_copy_path)s and " "diff disk %(diff_vhd_path)s"), {'base_vhd_copy_path': base_vhd_copy_path, 'diff_vhd_path': diff_vhd_path}) self._vhdutils.merge_vhd(diff_vhd_path, base_vhd_copy_path) # Replace the differential VHD with the merged one self._pathutils.rename(base_vhd_copy_path, diff_vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_copy_path): self._pathutils.remove(base_vhd_copy_path) def _check_resize_vhd(self, vhd_path, vhd_info, new_size): curr_size = vhd_info['MaxInternalSize'] if new_size < curr_size: raise vmutils.VHDResizeException(_("Cannot resize a VHD " "to a smaller size")) elif new_size > curr_size: self._resize_vhd(vhd_path, new_size) def _resize_vhd(self, vhd_path, new_size): if vhd_path.split('.')[-1].lower() == "vhd": LOG.debug(_("Getting parent disk info for disk: %s"), vhd_path) base_disk_path = self._vhdutils.get_vhd_parent_path(vhd_path) if base_disk_path: # A differential VHD cannot be resized. This limitation # does not apply to the VHDX format. self._merge_base_vhd(vhd_path, base_disk_path) LOG.debug(_("Resizing disk \"%(vhd_path)s\" to new max " "size %(new_size)s"), {'vhd_path': vhd_path, 'new_size': new_size}) self._vhdutils.resize_vhd(vhd_path, new_size) def _check_base_disk(self, context, instance, diff_vhd_path, src_base_disk_path): base_vhd_path = self._imagecache.get_cached_image(context, instance) # If the location of the base host differs between source # and target hosts we need to reconnect the base disk if src_base_disk_path.lower() != base_vhd_path.lower(): LOG.debug(_("Reconnecting copied base VHD " "%(base_vhd_path)s and diff " "VHD %(diff_vhd_path)s"), {'base_vhd_path': base_vhd_path, 'diff_vhd_path': diff_vhd_path}) self._vhdutils.reconnect_parent_vhd(diff_vhd_path, base_vhd_path) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None, power_on=True): LOG.debug(_("finish_migration called"), instance=instance) instance_name = instance['name'] if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) if not root_vhd_path: raise vmutils.HyperVException(_("Cannot find boot VHD " "file for instance: %s") % instance_name) root_vhd_info = self._vhdutils.get_vhd_info(root_vhd_path) src_base_disk_path = root_vhd_info.get("ParentPath") if src_base_disk_path: self._check_base_disk(context, instance, root_vhd_path, src_base_disk_path) if resize_instance: new_size = instance['root_gb'] * units.Gi self._check_resize_vhd(root_vhd_path, root_vhd_info, new_size) eph_vhd_path = self._pathutils.lookup_ephemeral_vhd_path(instance_name) if resize_instance: new_size = instance.get('ephemeral_gb', 0) * units.Gi if not eph_vhd_path: if new_size: eph_vhd_path = self._vmops.create_ephemeral_vhd(instance) else: eph_vhd_info = self._vhdutils.get_vhd_info(eph_vhd_path) self._check_resize_vhd(eph_vhd_path, eph_vhd_info, new_size) self._vmops.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path) self._check_and_attach_config_drive(instance) if power_on: self._vmops.power_on(instance) nova-2014.1.5/nova/virt/hyperv/imagecache.py0000664000567000056700000001321112540642544021737 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Image caching and management. """ import os from oslo.config import cfg from nova.compute import flavors from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils from nova.virt.hyperv import utilsfactory from nova.virt.hyperv import vmutils from nova.virt import images LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('use_cow_images', 'nova.virt.driver') class ImageCache(object): def __init__(self): self._pathutils = utilsfactory.get_pathutils() self._vhdutils = utilsfactory.get_vhdutils() def _get_root_vhd_size_gb(self, instance): try: # In case of resizes we need the old root disk size old_flavor = flavors.extract_flavor( instance, prefix='old_') return old_flavor['root_gb'] except KeyError: return instance['root_gb'] def _resize_and_cache_vhd(self, instance, vhd_path): vhd_info = self._vhdutils.get_vhd_info(vhd_path) vhd_size = vhd_info['MaxInternalSize'] root_vhd_size_gb = self._get_root_vhd_size_gb(instance) root_vhd_size = root_vhd_size_gb * units.Gi root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( vhd_path, root_vhd_size)) if root_vhd_internal_size < vhd_size: raise vmutils.HyperVException( _("Cannot resize the image to a size smaller than the VHD " "max. internal size: %(vhd_size)s. Requested disk size: " "%(root_vhd_size)s") % {'vhd_size': vhd_size, 'root_vhd_size': root_vhd_size} ) if root_vhd_internal_size > vhd_size: path_parts = os.path.splitext(vhd_path) resized_vhd_path = '%s_%s%s' % (path_parts[0], root_vhd_size_gb, path_parts[1]) @utils.synchronized(resized_vhd_path) def copy_and_resize_vhd(): if not self._pathutils.exists(resized_vhd_path): try: LOG.debug(_("Copying VHD %(vhd_path)s to " "%(resized_vhd_path)s"), {'vhd_path': vhd_path, 'resized_vhd_path': resized_vhd_path}) self._pathutils.copyfile(vhd_path, resized_vhd_path) LOG.debug(_("Resizing VHD %(resized_vhd_path)s to new " "size %(root_vhd_size)s"), {'resized_vhd_path': resized_vhd_path, 'root_vhd_size': root_vhd_size}) self._vhdutils.resize_vhd(resized_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(resized_vhd_path): self._pathutils.remove(resized_vhd_path) copy_and_resize_vhd() return resized_vhd_path def get_cached_image(self, context, instance): image_id = instance['image_ref'] base_vhd_dir = self._pathutils.get_base_vhd_dir() base_vhd_path = os.path.join(base_vhd_dir, image_id) @utils.synchronized(base_vhd_path) def fetch_image_if_not_existing(): vhd_path = None for format_ext in ['vhd', 'vhdx']: test_path = base_vhd_path + '.' + format_ext if self._pathutils.exists(test_path): vhd_path = test_path break if not vhd_path: try: images.fetch(context, image_id, base_vhd_path, instance['user_id'], instance['project_id']) format_ext = self._vhdutils.get_vhd_format(base_vhd_path) vhd_path = base_vhd_path + '.' + format_ext.lower() self._pathutils.rename(base_vhd_path, vhd_path) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(base_vhd_path): self._pathutils.remove(base_vhd_path) return vhd_path vhd_path = fetch_image_if_not_existing() if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd': # Resize the base VHD image as it's not possible to resize a # differencing VHD. This does not apply to VHDX images. resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path) if resized_vhd_path: return resized_vhd_path return vhd_path nova-2014.1.5/nova/virt/hyperv/hostutils.py0000664000567000056700000000615512540642544021740 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ctypes import socket import sys if sys.platform == 'win32': import wmi class HostUtils(object): def __init__(self): if sys.platform == 'win32': self._conn_cimv2 = wmi.WMI(moniker='//./root/cimv2') def get_cpus_info(self): cpus = self._conn_cimv2.query("SELECT * FROM Win32_Processor " "WHERE ProcessorType = 3") cpus_list = [] for cpu in cpus: cpu_info = {'Architecture': cpu.Architecture, 'Name': cpu.Name, 'Manufacturer': cpu.Manufacturer, 'NumberOfCores': cpu.NumberOfCores, 'NumberOfLogicalProcessors': cpu.NumberOfLogicalProcessors} cpus_list.append(cpu_info) return cpus_list def is_cpu_feature_present(self, feature_key): return ctypes.windll.kernel32.IsProcessorFeaturePresent(feature_key) def get_memory_info(self): """Returns a tuple with total visible memory and free physical memory expressed in kB. """ mem_info = self._conn_cimv2.query("SELECT TotalVisibleMemorySize, " "FreePhysicalMemory " "FROM win32_operatingsystem")[0] return (long(mem_info.TotalVisibleMemorySize), long(mem_info.FreePhysicalMemory)) def get_volume_info(self, drive): """Returns a tuple with total size and free space expressed in bytes. """ logical_disk = self._conn_cimv2.query("SELECT Size, FreeSpace " "FROM win32_logicaldisk " "WHERE DeviceID='%s'" % drive)[0] return (long(logical_disk.Size), long(logical_disk.FreeSpace)) def check_min_windows_version(self, major, minor, build=0): version_str = self.get_windows_version() return map(int, version_str.split('.')) >= [major, minor, build] def get_windows_version(self): return self._conn_cimv2.Win32_OperatingSystem()[0].Version def get_local_ips(self): addr_info = socket.getaddrinfo(socket.gethostname(), None, 0, 0, 0) # Returns IPv4 and IPv6 addresses, ordered by protocol family addr_info.sort() return [a[4][0] for a in addr_info] def get_host_tick_count64(self): return ctypes.windll.kernel32.GetTickCount64() nova-2014.1.5/nova/virt/hyperv/snapshotops.py0000664000567000056700000001241712540642544022261 0ustar jenkinsjenkins00000000000000# Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for VM snapshot operations. """ import os from oslo.config import cfg from nova.compute import task_states from nova.image import glance from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.hyperv import utilsfactory CONF = cfg.CONF LOG = logging.getLogger(__name__) class SnapshotOps(object): def __init__(self): self._pathutils = utilsfactory.get_pathutils() self._vmutils = utilsfactory.get_vmutils() self._vhdutils = utilsfactory.get_vhdutils() def _save_glance_image(self, context, name, image_vhd_path): (glance_image_service, image_id) = glance.get_remote_image_service(context, name) image_metadata = {"is_public": False, "disk_format": "vhd", "container_format": "bare", "properties": {}} with self._pathutils.open(image_vhd_path, 'rb') as f: glance_image_service.update(context, image_id, image_metadata, f) def snapshot(self, context, instance, name, update_task_state): """Create snapshot from a running VM instance.""" instance_name = instance["name"] LOG.debug(_("Creating snapshot for instance %s"), instance_name) snapshot_path = self._vmutils.take_vm_snapshot(instance_name) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) export_dir = None try: src_vhd_path = self._pathutils.lookup_root_vhd_path(instance_name) LOG.debug(_("Getting info for VHD %s"), src_vhd_path) src_base_disk_path = self._vhdutils.get_vhd_parent_path( src_vhd_path) export_dir = self._pathutils.get_export_dir(instance_name) dest_vhd_path = os.path.join(export_dir, os.path.basename( src_vhd_path)) LOG.debug(_('Copying VHD %(src_vhd_path)s to %(dest_vhd_path)s'), {'src_vhd_path': src_vhd_path, 'dest_vhd_path': dest_vhd_path}) self._pathutils.copyfile(src_vhd_path, dest_vhd_path) image_vhd_path = None if not src_base_disk_path: image_vhd_path = dest_vhd_path else: basename = os.path.basename(src_base_disk_path) dest_base_disk_path = os.path.join(export_dir, basename) LOG.debug(_('Copying base disk %(src_vhd_path)s to ' '%(dest_base_disk_path)s'), {'src_vhd_path': src_vhd_path, 'dest_base_disk_path': dest_base_disk_path}) self._pathutils.copyfile(src_base_disk_path, dest_base_disk_path) LOG.debug(_("Reconnecting copied base VHD " "%(dest_base_disk_path)s and diff " "VHD %(dest_vhd_path)s"), {'dest_base_disk_path': dest_base_disk_path, 'dest_vhd_path': dest_vhd_path}) self._vhdutils.reconnect_parent_vhd(dest_vhd_path, dest_base_disk_path) LOG.debug(_("Merging base disk %(dest_base_disk_path)s and " "diff disk %(dest_vhd_path)s"), {'dest_base_disk_path': dest_base_disk_path, 'dest_vhd_path': dest_vhd_path}) self._vhdutils.merge_vhd(dest_vhd_path, dest_base_disk_path) image_vhd_path = dest_base_disk_path LOG.debug(_("Updating Glance image %(name)s with content from " "merged disk %(image_vhd_path)s"), {'name': name, 'image_vhd_path': image_vhd_path}) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) self._save_glance_image(context, name, image_vhd_path) LOG.debug(_("Snapshot image %(name)s updated for VM " "%(instance_name)s"), {'name': name, 'instance_name': instance_name}) finally: try: LOG.debug(_("Removing snapshot %s"), name) self._vmutils.remove_vm_snapshot(snapshot_path) except Exception as ex: LOG.exception(ex) LOG.warning(_('Failed to remove snapshot for VM %s') % instance_name) if export_dir: LOG.debug(_('Removing directory: %s'), export_dir) self._pathutils.rmtree(export_dir) nova-2014.1.5/nova/virt/hyperv/hostops.py0000664000567000056700000001560712540642544021403 0ustar jenkinsjenkins00000000000000# Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for host operations. """ import datetime import os import platform import time from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import units from nova.virt.hyperv import constants from nova.virt.hyperv import utilsfactory CONF = cfg.CONF CONF.import_opt('my_ip', 'nova.netconf') LOG = logging.getLogger(__name__) class HostOps(object): def __init__(self): self._stats = None self._hostutils = utilsfactory.get_hostutils() self._pathutils = utilsfactory.get_pathutils() def _get_cpu_info(self): """Get the CPU information. :returns: A dictionary containing the main properties of the central processor in the hypervisor. """ cpu_info = dict() processors = self._hostutils.get_cpus_info() w32_arch_dict = constants.WMI_WIN32_PROCESSOR_ARCHITECTURE cpu_info['arch'] = w32_arch_dict.get(processors[0]['Architecture'], 'Unknown') cpu_info['model'] = processors[0]['Name'] cpu_info['vendor'] = processors[0]['Manufacturer'] topology = dict() topology['sockets'] = len(processors) topology['cores'] = processors[0]['NumberOfCores'] topology['threads'] = (processors[0]['NumberOfLogicalProcessors'] / processors[0]['NumberOfCores']) cpu_info['topology'] = topology features = list() for fkey, fname in constants.PROCESSOR_FEATURE.items(): if self._hostutils.is_cpu_feature_present(fkey): features.append(fname) cpu_info['features'] = features return cpu_info def _get_memory_info(self): (total_mem_kb, free_mem_kb) = self._hostutils.get_memory_info() total_mem_mb = total_mem_kb / 1024 free_mem_mb = free_mem_kb / 1024 return (total_mem_mb, free_mem_mb, total_mem_mb - free_mem_mb) def _get_local_hdd_info_gb(self): drive = os.path.splitdrive(self._pathutils.get_instances_dir())[0] (size, free_space) = self._hostutils.get_volume_info(drive) total_gb = size / units.Gi free_gb = free_space / units.Gi used_gb = total_gb - free_gb return (total_gb, free_gb, used_gb) def _get_hypervisor_version(self): """Get hypervisor version. :returns: hypervisor version (ex. 12003) """ version = self._hostutils.get_windows_version().replace('.', '') LOG.debug(_('Windows version: %s ') % version) return version def get_available_resource(self): """Retrieve resource info. This method is called when nova-compute launches, and as part of a periodic task. :returns: dictionary describing resources """ LOG.debug(_('get_available_resource called')) (total_mem_mb, free_mem_mb, used_mem_mb) = self._get_memory_info() (total_hdd_gb, free_hdd_gb, used_hdd_gb) = self._get_local_hdd_info_gb() cpu_info = self._get_cpu_info() cpu_topology = cpu_info['topology'] vcpus = (cpu_topology['sockets'] * cpu_topology['cores'] * cpu_topology['threads']) dic = {'vcpus': vcpus, 'memory_mb': total_mem_mb, 'memory_mb_used': used_mem_mb, 'local_gb': total_hdd_gb, 'local_gb_used': used_hdd_gb, 'hypervisor_type': "hyperv", 'hypervisor_version': self._get_hypervisor_version(), 'hypervisor_hostname': platform.node(), 'vcpus_used': 0, 'cpu_info': jsonutils.dumps(cpu_info), 'supported_instances': jsonutils.dumps( [('i686', 'hyperv', 'hvm'), ('x86_64', 'hyperv', 'hvm')]) } return dic def _update_stats(self): LOG.debug(_("Updating host stats")) (total_mem_mb, free_mem_mb, used_mem_mb) = self._get_memory_info() (total_hdd_gb, free_hdd_gb, used_hdd_gb) = self._get_local_hdd_info_gb() data = {} data["disk_total"] = total_hdd_gb data["disk_used"] = used_hdd_gb data["disk_available"] = free_hdd_gb data["host_memory_total"] = total_mem_mb data["host_memory_overhead"] = used_mem_mb data["host_memory_free"] = free_mem_mb data["host_memory_free_computed"] = free_mem_mb data["supported_instances"] = [('i686', 'hyperv', 'hvm'), ('x86_64', 'hyperv', 'hvm')] data["hypervisor_hostname"] = platform.node() self._stats = data def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first. """ LOG.debug(_("get_host_stats called")) if refresh or not self._stats: self._update_stats() return self._stats def host_power_action(self, host, action): """Reboots, shuts down or powers up the host.""" pass def get_host_ip_addr(self): host_ip = CONF.my_ip if not host_ip: # Return the first available address host_ip = self._hostutils.get_local_ips()[0] LOG.debug(_("Host IP address is: %s"), host_ip) return host_ip def get_host_uptime(self): """Returns the host uptime.""" tick_count64 = self._hostutils.get_host_tick_count64() # format the string to match libvirt driver uptime # Libvirt uptime returns a combination of the following # - curent host time # - time since host is up # - number of logged in users # - cpu load # Since the Windows function GetTickCount64 returns only # the time since the host is up, returning 0s for cpu load # and number of logged in users. # This is done to ensure the format of the returned # value is same as in libvirt return "%s up %s, 0 users, load average: 0, 0, 0" % ( str(time.strftime("%H:%M:%S")), str(datetime.timedelta(milliseconds=long(tick_count64)))) nova-2014.1.5/nova/virt/hyperv/driver.py0000664000567000056700000002345412540642544021176 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Cloud.com, Inc # Copyright (c) 2012 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A Hyper-V Nova Compute driver. """ from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt import driver from nova.virt.hyperv import hostops from nova.virt.hyperv import livemigrationops from nova.virt.hyperv import migrationops from nova.virt.hyperv import rdpconsoleops from nova.virt.hyperv import snapshotops from nova.virt.hyperv import vmops from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) class HyperVDriver(driver.ComputeDriver): def __init__(self, virtapi): super(HyperVDriver, self).__init__(virtapi) self._hostops = hostops.HostOps() self._volumeops = volumeops.VolumeOps() self._vmops = vmops.VMOps() self._snapshotops = snapshotops.SnapshotOps() self._livemigrationops = livemigrationops.LiveMigrationOps() self._migrationops = migrationops.MigrationOps() self._rdpconsoleops = rdpconsoleops.RDPConsoleOps() def init_host(self, host): pass def list_instance_uuids(self): return self._vmops.list_instance_uuids() def list_instances(self): return self._vmops.list_instances() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): self._vmops.reboot(instance, network_info, reboot_type) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): self._vmops.destroy(instance, network_info, block_device_info, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Cleanup after instance being destroyed by Hypervisor.""" pass def get_info(self, instance): return self._vmops.get_info(instance) def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): return self._volumeops.attach_volume(connection_info, instance['name']) def detach_volume(self, connection_info, instance, mountpoint, encryption=None): return self._volumeops.detach_volume(connection_info, instance['name']) def get_volume_connector(self, instance): return self._volumeops.get_volume_connector(instance) def get_available_resource(self, nodename): return self._hostops.get_available_resource() def get_host_stats(self, refresh=False): return self._hostops.get_host_stats(refresh) def host_power_action(self, host, action): return self._hostops.host_power_action(host, action) def snapshot(self, context, instance, name, update_task_state): self._snapshotops.snapshot(context, instance, name, update_task_state) def pause(self, instance): self._vmops.pause(instance) def unpause(self, instance): self._vmops.unpause(instance) def suspend(self, instance): self._vmops.suspend(instance) def resume(self, context, instance, network_info, block_device_info=None): self._vmops.resume(instance) def power_off(self, instance): self._vmops.power_off(instance) def power_on(self, context, instance, network_info, block_device_info=None): self._vmops.power_on(instance, block_device_info) def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """Resume guest state when a host is booted.""" self._vmops.resume_state_on_host_boot(context, instance, network_info, block_device_info) def live_migration(self, context, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): self._livemigrationops.live_migration(context, instance_ref, dest, post_method, recover_method, block_migration, migrate_data) def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info): self.destroy(context, instance, network_info, block_device_info) def pre_live_migration(self, context, instance, block_device_info, network_info, disk, migrate_data=None): self._livemigrationops.pre_live_migration(context, instance, block_device_info, network_info) def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, block_migr=False, block_device_info=None): self._livemigrationops.post_live_migration_at_destination(ctxt, instance_ref, network_info, block_migr) def check_can_live_migrate_destination(self, ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): return self._livemigrationops.check_can_live_migrate_destination( ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration, disk_over_commit) def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): self._livemigrationops.check_can_live_migrate_destination_cleanup( ctxt, dest_check_data) def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data, block_device_info=None): return self._livemigrationops.check_can_live_migrate_source( ctxt, instance_ref, dest_check_data) def get_instance_disk_info(self, instance_name, block_device_info=None): pass def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" msg = _("VIF plugging is not supported by the Hyper-V driver.") raise NotImplementedError(msg) def unplug_vifs(self, instance, network_info): """Unplug VIFs from networks.""" msg = _("VIF unplugging is not supported by the Hyper-V driver.") raise NotImplementedError(msg) def ensure_filtering_rules_for_instance(self, instance_ref, network_info): LOG.debug(_("ensure_filtering_rules_for_instance called"), instance=instance_ref) def unfilter_instance(self, instance, network_info): LOG.debug(_("unfilter_instance called"), instance=instance) def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): return self._migrationops.migrate_disk_and_power_off(context, instance, dest, flavor, network_info, block_device_info) def confirm_migration(self, migration, instance, network_info): self._migrationops.confirm_migration(migration, instance, network_info) def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): self._migrationops.finish_revert_migration(context, instance, network_info, block_device_info, power_on) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None, power_on=True): self._migrationops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info, power_on) def get_host_ip_addr(self): return self._hostops.get_host_ip_addr() def get_host_uptime(self, host): return self._hostops.get_host_uptime() def get_rdp_console(self, context, instance): return self._rdpconsoleops.get_rdp_console(instance) nova-2014.1.5/nova/virt/hyperv/README.rst0000664000567000056700000000265312540642532021013 0ustar jenkinsjenkins00000000000000Hyper-V Volumes Management ============================================= To enable the volume features, the first thing that needs to be done is to enable the iSCSI service on the Windows compute nodes and set it to start automatically. sc config msiscsi start= auto net start msiscsi In Windows Server 2012, it's important to execute the following commands to prevent having the volumes being online by default: diskpart san policy=OfflineAll exit How to check if your iSCSI configuration is working properly: On your OpenStack controller: 1. Create a volume with e.g. "nova volume-create 1" and note the generated volume id On Windows: 2. iscsicli QAddTargetPortal 3. iscsicli ListTargets The output should contain the iqn related to your volume: iqn.2010-10.org.openstack:volume- How to test Boot from volume in Hyper-V from the OpenStack dashboard: 1. Fist of all create a volume 2. Get the volume ID of the created volume 3. Upload and untar to the Cloud controller the next VHD image: http://dev.opennebula.org/attachments/download/482/ttylinux.vhd.gz 4. sudo dd if=/path/to/vhdfileofstep3 of=/dev/nova-volumes/volume-XXXXX <- Related to the ID of step 2 5. Launch an instance from any image (this is not important because we are just booting from a volume) from the dashboard, and don't forget to select boot from volume and select the volume created in step2. Important: Device name must be "vda". nova-2014.1.5/nova/virt/hyperv/constants.py0000664000567000056700000000364412540642544021716 0ustar jenkinsjenkins00000000000000# Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Constants used in ops classes """ from nova.compute import power_state HYPERV_VM_STATE_ENABLED = 2 HYPERV_VM_STATE_DISABLED = 3 HYPERV_VM_STATE_REBOOT = 10 HYPERV_VM_STATE_PAUSED = 32768 HYPERV_VM_STATE_SUSPENDED = 32769 HYPERV_POWER_STATE = { HYPERV_VM_STATE_DISABLED: power_state.SHUTDOWN, HYPERV_VM_STATE_ENABLED: power_state.RUNNING, HYPERV_VM_STATE_PAUSED: power_state.PAUSED, HYPERV_VM_STATE_SUSPENDED: power_state.SUSPENDED } WMI_WIN32_PROCESSOR_ARCHITECTURE = { 0: 'x86', 1: 'MIPS', 2: 'Alpha', 3: 'PowerPC', 5: 'ARM', 6: 'Itanium-based systems', 9: 'x64', } PROCESSOR_FEATURE = { 7: '3dnow', 3: 'mmx', 12: 'nx', 9: 'pae', 8: 'rdtsc', 20: 'slat', 13: 'sse3', 21: 'vmx', 6: 'sse', 10: 'sse2', 17: 'xsave', } WMI_JOB_STATUS_STARTED = 4096 WMI_JOB_STATE_RUNNING = 4 WMI_JOB_STATE_COMPLETED = 7 VM_SUMMARY_NUM_PROCS = 4 VM_SUMMARY_ENABLED_STATE = 100 VM_SUMMARY_MEMORY_USAGE = 103 VM_SUMMARY_UPTIME = 105 IDE_DISK = "VHD" IDE_DISK_FORMAT = IDE_DISK IDE_DVD = "DVD" IDE_DVD_FORMAT = "ISO" DISK_FORMAT_MAP = { IDE_DISK_FORMAT.lower(): IDE_DISK, IDE_DVD_FORMAT.lower(): IDE_DVD } DISK_FORMAT_VHD = "VHD" DISK_FORMAT_VHDX = "VHDX" VHD_TYPE_FIXED = 2 VHD_TYPE_DYNAMIC = 3 SCSI_CONTROLLER_SLOTS_NUMBER = 64 nova-2014.1.5/nova/virt/hyperv/livemigrationops.py0000664000567000056700000001141312540642544023266 0ustar jenkinsjenkins00000000000000# Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for live migration VM operations. """ import functools from oslo.config import cfg from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.hyperv import imagecache from nova.virt.hyperv import utilsfactory from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('use_cow_images', 'nova.virt.driver') def check_os_version_requirement(function): @functools.wraps(function) def wrapper(self, *args, **kwds): if not self._livemigrutils: raise NotImplementedError(_('Live migration is supported ' 'starting with Hyper-V Server ' '2012')) return function(self, *args, **kwds) return wrapper class LiveMigrationOps(object): def __init__(self): # Live migration is supported starting from Hyper-V Server 2012 if utilsfactory.get_hostutils().check_min_windows_version(6, 2): self._livemigrutils = utilsfactory.get_livemigrationutils() else: self._livemigrutils = None self._pathutils = utilsfactory.get_pathutils() self._volumeops = volumeops.VolumeOps() self._imagecache = imagecache.ImageCache() @check_os_version_requirement def live_migration(self, context, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): LOG.debug(_("live_migration called"), instance=instance_ref) instance_name = instance_ref["name"] try: iscsi_targets = self._livemigrutils.live_migrate_vm(instance_name, dest) for (target_iqn, target_lun) in iscsi_targets: self._volumeops.logout_storage_target(target_iqn) except Exception: with excutils.save_and_reraise_exception(): LOG.debug(_("Calling live migration recover_method " "for instance: %s"), instance_name) recover_method(context, instance_ref, dest, block_migration) LOG.debug(_("Calling live migration post_method for instance: %s"), instance_name) post_method(context, instance_ref, dest, block_migration) @check_os_version_requirement def pre_live_migration(self, context, instance, block_device_info, network_info): LOG.debug(_("pre_live_migration called"), instance=instance) self._livemigrutils.check_live_migration_config() if CONF.use_cow_images: boot_from_volume = self._volumeops.ebs_root_in_block_devices( block_device_info) if not boot_from_volume: self._imagecache.get_cached_image(context, instance) self._volumeops.login_storage_targets(block_device_info) @check_os_version_requirement def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, block_migration): LOG.debug(_("post_live_migration_at_destination called"), instance=instance_ref) @check_os_version_requirement def check_can_live_migrate_destination(self, ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): LOG.debug(_("check_can_live_migrate_destination called"), instance_ref) return {} @check_os_version_requirement def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): LOG.debug(_("check_can_live_migrate_destination_cleanup called")) @check_os_version_requirement def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data): LOG.debug(_("check_can_live_migrate_source called"), instance_ref) return dest_check_data nova-2014.1.5/nova/virt/hyperv/networkutilsv2.py0000664000567000056700000000446112540642544022722 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility class for network related operations. Based on the "root/virtualization/v2" namespace available starting with Hyper-V Server / Windows Server 2012. """ import sys if sys.platform == 'win32': import wmi from nova.openstack.common.gettextutils import _ from nova.virt.hyperv import networkutils from nova.virt.hyperv import vmutils class NetworkUtilsV2(networkutils.NetworkUtils): def __init__(self): if sys.platform == 'win32': self._conn = wmi.WMI(moniker='//./root/virtualization/v2') def get_external_vswitch(self, vswitch_name): if vswitch_name: vswitches = self._conn.Msvm_VirtualEthernetSwitch( ElementName=vswitch_name) if not len(vswitches): raise vmutils.HyperVException(_('vswitch "%s" not found') % vswitch_name) else: # Find the vswitch that is connected to the first physical nic. ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0] lep = ext_port.associators(wmi_result_class='Msvm_LANEndpoint')[0] lep1 = lep.associators(wmi_result_class='Msvm_LANEndpoint')[0] esw = lep1.associators( wmi_result_class='Msvm_EthernetSwitchPort')[0] vswitches = esw.associators( wmi_result_class='Msvm_VirtualEthernetSwitch') if not len(vswitches): raise vmutils.HyperVException(_('No external vswitch found')) return vswitches[0].path_() def create_vswitch_port(self, vswitch_path, port_name): raise NotImplementedError() def vswitch_port_needed(self): return False nova-2014.1.5/nova/virt/hyperv/vmops.py0000775000567000056700000004541012540642544021046 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Cloud.com, Inc # Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for basic VM operations. """ import functools import os from oslo.config import cfg from nova.api.metadata import base as instance_metadata from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import units from nova.openstack.common import uuidutils from nova import utils from nova.virt import configdrive from nova.virt.hyperv import constants from nova.virt.hyperv import imagecache from nova.virt.hyperv import utilsfactory from nova.virt.hyperv import vmutils from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) hyperv_opts = [ cfg.BoolOpt('limit_cpu_features', default=False, help='Required for live migration among ' 'hosts with different CPU features'), cfg.BoolOpt('config_drive_inject_password', default=False, help='Sets the admin password in the config drive image'), cfg.StrOpt('qemu_img_cmd', default="qemu-img.exe", help='Path of qemu-img command which is used to convert ' 'between different image types'), cfg.BoolOpt('config_drive_cdrom', default=False, help='Attaches the Config Drive image as a cdrom drive ' 'instead of a disk drive'), cfg.BoolOpt('enable_instance_metrics_collection', default=False, help='Enables metrics collections for an instance by using ' 'Hyper-V\'s metric APIs. Collected data can by retrieved ' 'by other apps and services, e.g.: Ceilometer. ' 'Requires Hyper-V / Windows Server 2012 and above'), cfg.FloatOpt('dynamic_memory_ratio', default=1.0, help='Enables dynamic memory allocation (ballooning) when ' 'set to a value greater than 1. The value expresses ' 'the ratio between the total RAM assigned to an ' 'instance and its startup RAM amount. For example a ' 'ratio of 2.0 for an instance with 1024MB of RAM ' 'implies 512MB of RAM allocated at startup') ] CONF = cfg.CONF CONF.register_opts(hyperv_opts, 'hyperv') CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('network_api_class', 'nova.network') def check_admin_permissions(function): @functools.wraps(function) def wrapper(self, *args, **kwds): # Make sure the windows account has the required admin permissions. self._vmutils.check_admin_permissions() return function(self, *args, **kwds) return wrapper class VMOps(object): _vif_driver_class_map = { 'nova.network.neutronv2.api.API': 'nova.virt.hyperv.vif.HyperVNeutronVIFDriver', 'nova.network.api.API': 'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver', } def __init__(self): self._vmutils = utilsfactory.get_vmutils() self._vhdutils = utilsfactory.get_vhdutils() self._pathutils = utilsfactory.get_pathutils() self._volumeops = volumeops.VolumeOps() self._imagecache = imagecache.ImageCache() self._vif_driver = None self._load_vif_driver_class() def _load_vif_driver_class(self): try: class_name = self._vif_driver_class_map[CONF.network_api_class] self._vif_driver = importutils.import_object(class_name) except KeyError: raise TypeError(_("VIF driver not found for " "network_api_class: %s") % CONF.network_api_class) def list_instance_uuids(self): instance_uuids = [] for (instance_name, notes) in self._vmutils.list_instance_notes(): if notes and uuidutils.is_uuid_like(notes[0]): instance_uuids.append(str(notes[0])) else: LOG.debug("Notes not found or not resembling a GUID for " "instance: %s" % instance_name) return instance_uuids def list_instances(self): return self._vmutils.list_instances() def get_info(self, instance): """Get information about the VM.""" LOG.debug(_("get_info called for instance"), instance=instance) instance_name = instance['name'] if not self._vmutils.vm_exists(instance_name): raise exception.InstanceNotFound(instance_id=instance['uuid']) info = self._vmutils.get_vm_summary_info(instance_name) state = constants.HYPERV_POWER_STATE[info['EnabledState']] return {'state': state, 'max_mem': info['MemoryUsage'], 'mem': info['MemoryUsage'], 'num_cpu': info['NumberOfProcessors'], 'cpu_time': info['UpTime']} def _create_root_vhd(self, context, instance): base_vhd_path = self._imagecache.get_cached_image(context, instance) format_ext = base_vhd_path.split('.')[-1] root_vhd_path = self._pathutils.get_root_vhd_path(instance['name'], format_ext) try: if CONF.use_cow_images: LOG.debug(_("Creating differencing VHD. Parent: " "%(base_vhd_path)s, Target: %(root_vhd_path)s"), {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._vhdutils.create_differencing_vhd(root_vhd_path, base_vhd_path) else: LOG.debug(_("Copying VHD image %(base_vhd_path)s to target: " "%(root_vhd_path)s"), {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._pathutils.copyfile(base_vhd_path, root_vhd_path) base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path) base_vhd_size = base_vhd_info['MaxInternalSize'] root_vhd_size = instance['root_gb'] * units.Gi root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( root_vhd_path, root_vhd_size)) if root_vhd_internal_size < base_vhd_size: error_msg = _("Cannot resize a VHD to a smaller size, the" " original size is %(base_vhd_size)s, the" " newer size is %(root_vhd_size)s" ) % {'base_vhd_size': base_vhd_size, 'root_vhd_size': root_vhd_internal_size} raise vmutils.HyperVException(error_msg) elif root_vhd_internal_size > base_vhd_size: LOG.debug(_("Resizing VHD %(root_vhd_path)s to new " "size %(root_vhd_size)s"), {'root_vhd_size': root_vhd_internal_size, 'root_vhd_path': root_vhd_path}, instance=instance) self._vhdutils.resize_vhd(root_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(root_vhd_path): self._pathutils.remove(root_vhd_path) return root_vhd_path def create_ephemeral_vhd(self, instance): eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi if eph_vhd_size: vhd_format = self._vhdutils.get_best_supported_vhd_format() eph_vhd_path = self._pathutils.get_ephemeral_vhd_path( instance['name'], vhd_format) self._vhdutils.create_dynamic_vhd(eph_vhd_path, eph_vhd_size, vhd_format) return eph_vhd_path @check_admin_permissions def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info(_("Spawning new instance"), instance=instance) instance_name = instance['name'] if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) if self._volumeops.ebs_root_in_block_devices(block_device_info): root_vhd_path = None else: root_vhd_path = self._create_root_vhd(context, instance) eph_vhd_path = self.create_ephemeral_vhd(instance) try: self.create_instance(instance, network_info, block_device_info, root_vhd_path, eph_vhd_path) if configdrive.required_by(instance): configdrive_path = self._create_config_drive(instance, injected_files, admin_password) self.attach_config_drive(instance, configdrive_path) self.power_on(instance) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance) def create_instance(self, instance, network_info, block_device_info, root_vhd_path, eph_vhd_path): instance_name = instance['name'] self._vmutils.create_vm(instance_name, instance['memory_mb'], instance['vcpus'], CONF.hyperv.limit_cpu_features, CONF.hyperv.dynamic_memory_ratio, [instance['uuid']]) ctrl_disk_addr = 0 if root_vhd_path: self._vmutils.attach_ide_drive(instance_name, root_vhd_path, 0, ctrl_disk_addr, constants.IDE_DISK) ctrl_disk_addr += 1 if eph_vhd_path: self._vmutils.attach_ide_drive(instance_name, eph_vhd_path, 0, ctrl_disk_addr, constants.IDE_DISK) self._vmutils.create_scsi_controller(instance_name) self._volumeops.attach_volumes(block_device_info, instance_name, root_vhd_path is None) for vif in network_info: LOG.debug(_('Creating nic for instance'), instance=instance) self._vmutils.create_nic(instance_name, vif['id'], vif['address']) self._vif_driver.plug(instance, vif) if CONF.hyperv.enable_instance_metrics_collection: self._vmutils.enable_vm_metrics_collection(instance_name) def _create_config_drive(self, instance, injected_files, admin_password): if CONF.config_drive_format != 'iso9660': raise vmutils.UnsupportedConfigDriveFormatException( _('Invalid config_drive_format "%s"') % CONF.config_drive_format) LOG.info(_('Using config drive for instance'), instance=instance) extra_md = {} if admin_password and CONF.hyperv.config_drive_inject_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata(instance, content=injected_files, extra_md=extra_md) instance_path = self._pathutils.get_instance_dir( instance['name']) configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso') LOG.info(_('Creating config drive at %(path)s'), {'path': configdrive_path_iso}, instance=instance) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: try: cdb.make_drive(configdrive_path_iso) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error(_('Creating config drive failed with error: %s'), e, instance=instance) if not CONF.hyperv.config_drive_cdrom: configdrive_path = os.path.join(instance_path, 'configdrive.vhd') utils.execute(CONF.hyperv.qemu_img_cmd, 'convert', '-f', 'raw', '-O', 'vpc', configdrive_path_iso, configdrive_path, attempts=1) self._pathutils.remove(configdrive_path_iso) else: configdrive_path = configdrive_path_iso return configdrive_path def attach_config_drive(self, instance, configdrive_path): configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):] # Do the attach here and if there is a certain file format that isn't # supported in constants.DISK_FORMAT_MAP then bomb out. try: self._vmutils.attach_ide_drive(instance.name, configdrive_path, 1, 0, constants.DISK_FORMAT_MAP[configdrive_ext]) except KeyError: raise exception.InvalidDiskFormat(disk_format=configdrive_ext) def _disconnect_volumes(self, volume_drives): for volume_drive in volume_drives: self._volumeops.disconnect_volume(volume_drive) def _delete_disk_files(self, instance_name): self._pathutils.get_instance_dir(instance_name, create_dir=False, remove_dir=True) def destroy(self, instance, network_info=None, block_device_info=None, destroy_disks=True): instance_name = instance['name'] LOG.info(_("Got request to destroy instance"), instance=instance) try: if self._vmutils.vm_exists(instance_name): #Stop the VM first. self.power_off(instance) storage = self._vmutils.get_vm_storage_paths(instance_name) (disk_files, volume_drives) = storage self._vmutils.destroy_vm(instance_name) self._disconnect_volumes(volume_drives) else: LOG.debug(_("Instance not found"), instance=instance) if destroy_disks: self._delete_disk_files(instance_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_('Failed to destroy instance: %s'), instance_name) def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" LOG.debug(_("Rebooting instance"), instance=instance) self._set_vm_state(instance['name'], constants.HYPERV_VM_STATE_REBOOT) def pause(self, instance): """Pause VM instance.""" LOG.debug(_("Pause instance"), instance=instance) self._set_vm_state(instance["name"], constants.HYPERV_VM_STATE_PAUSED) def unpause(self, instance): """Unpause paused VM instance.""" LOG.debug(_("Unpause instance"), instance=instance) self._set_vm_state(instance["name"], constants.HYPERV_VM_STATE_ENABLED) def suspend(self, instance): """Suspend the specified instance.""" LOG.debug(_("Suspend instance"), instance=instance) self._set_vm_state(instance["name"], constants.HYPERV_VM_STATE_SUSPENDED) def resume(self, instance): """Resume the suspended VM instance.""" LOG.debug(_("Resume instance"), instance=instance) self._set_vm_state(instance["name"], constants.HYPERV_VM_STATE_ENABLED) def power_off(self, instance): """Power off the specified instance.""" LOG.debug(_("Power off instance"), instance=instance) self._set_vm_state(instance["name"], constants.HYPERV_VM_STATE_DISABLED) def power_on(self, instance, block_device_info=None): """Power on the specified instance.""" LOG.debug(_("Power on instance"), instance=instance) if block_device_info: self._volumeops.fix_instance_volume_disk_paths(instance['name'], block_device_info) self._set_vm_state(instance['name'], constants.HYPERV_VM_STATE_ENABLED) def _set_vm_state(self, vm_name, req_state): try: self._vmutils.set_vm_state(vm_name, req_state) LOG.debug(_("Successfully changed state of VM %(vm_name)s" " to: %(req_state)s"), {'vm_name': vm_name, 'req_state': req_state}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_("Failed to change vm state of %(vm_name)s" " to %(req_state)s"), {'vm_name': vm_name, 'req_state': req_state}) def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """Resume guest state when a host is booted.""" self.power_on(instance, block_device_info) nova-2014.1.5/nova/virt/hyperv/livemigrationutils.py0000664000567000056700000002620012540642544023625 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys if sys.platform == 'win32': import wmi from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.hyperv import vmutils from nova.virt.hyperv import vmutilsv2 from nova.virt.hyperv import volumeutilsv2 LOG = logging.getLogger(__name__) class LiveMigrationUtils(object): def __init__(self): self._vmutils = vmutilsv2.VMUtilsV2() self._volutils = volumeutilsv2.VolumeUtilsV2() def _get_conn_v2(self, host='localhost'): try: return wmi.WMI(moniker='//%s/root/virtualization/v2' % host) except wmi.x_wmi as ex: LOG.exception(ex) if ex.com_error.hresult == -2147217394: msg = (_('Live migration is not supported on target host "%s"') % host) elif ex.com_error.hresult == -2147023174: msg = (_('Target live migration host "%s" is unreachable') % host) else: msg = _('Live migration failed: %s') % ex.message raise vmutils.HyperVException(msg) def check_live_migration_config(self): conn_v2 = self._get_conn_v2() migration_svc = conn_v2.Msvm_VirtualSystemMigrationService()[0] vsmssds = migration_svc.associators( wmi_association_class='Msvm_ElementSettingData', wmi_result_class='Msvm_VirtualSystemMigrationServiceSettingData') vsmssd = vsmssds[0] if not vsmssd.EnableVirtualSystemMigration: raise vmutils.HyperVException( _('Live migration is not enabled on this host')) if not migration_svc.MigrationServiceListenerIPAddressList: raise vmutils.HyperVException( _('Live migration networks are not configured on this host')) def _get_vm(self, conn_v2, vm_name): vms = conn_v2.Msvm_ComputerSystem(ElementName=vm_name) n = len(vms) if not n: raise exception.NotFound(_('VM not found: %s') % vm_name) elif n > 1: raise vmutils.HyperVException(_('Duplicate VM name found: %s') % vm_name) return vms[0] def _destroy_planned_vm(self, conn_v2_remote, planned_vm): LOG.debug(_("Destroying existing remote planned VM: %s"), planned_vm.ElementName) vs_man_svc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0] (job_path, ret_val) = vs_man_svc.DestroySystem(planned_vm.path_()) self._vmutils.check_ret_val(ret_val, job_path) def _check_existing_planned_vm(self, conn_v2_remote, vm): # Make sure that there's not yet a remote planned VM on the target # host for this VM planned_vms = conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name) if planned_vms: self._destroy_planned_vm(conn_v2_remote, planned_vms[0]) def _create_remote_planned_vm(self, conn_v2_local, conn_v2_remote, vm, rmt_ip_addr_list, dest_host): # Staged vsmsd = conn_v2_local.query("select * from " "Msvm_VirtualSystemMigrationSettingData " "where MigrationType = 32770")[0] vsmsd.DestinationIPAddressList = rmt_ip_addr_list migration_setting_data = vsmsd.GetText_(1) LOG.debug(_("Creating remote planned VM for VM: %s"), vm.ElementName) migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0] (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost( ComputerSystem=vm.path_(), DestinationHost=dest_host, MigrationSettingData=migration_setting_data) self._vmutils.check_ret_val(ret_val, job_path) return conn_v2_remote.Msvm_PlannedComputerSystem(Name=vm.Name)[0] def _get_physical_disk_paths(self, vm_name): ide_ctrl_path = self._vmutils.get_vm_ide_controller(vm_name, 0) ide_paths = self._vmutils.get_controller_volume_paths(ide_ctrl_path) scsi_ctrl_path = self._vmutils.get_vm_scsi_controller(vm_name) scsi_paths = self._vmutils.get_controller_volume_paths(scsi_ctrl_path) return dict(ide_paths.items() + scsi_paths.items()) def _get_remote_disk_data(self, vmutils_remote, disk_paths, dest_host): volutils_remote = volumeutilsv2.VolumeUtilsV2(dest_host) disk_paths_remote = {} iscsi_targets = [] for (rasd_rel_path, disk_path) in disk_paths.items(): (target_iqn, target_lun) = self._volutils.get_target_from_disk_path(disk_path) iscsi_targets.append((target_iqn, target_lun)) dev_num = volutils_remote.get_device_number_for_target(target_iqn, target_lun) disk_path_remote = vmutils_remote.get_mounted_disk_by_drive_number( dev_num) disk_paths_remote[rasd_rel_path] = disk_path_remote return (disk_paths_remote, iscsi_targets) def _update_planned_vm_disk_resources(self, vmutils_remote, conn_v2_remote, planned_vm, vm_name, disk_paths_remote): vm_settings = planned_vm.associators( wmi_association_class='Msvm_SettingsDefineState', wmi_result_class='Msvm_VirtualSystemSettingData')[0] updated_resource_setting_data = [] sasds = vm_settings.associators( wmi_association_class='Msvm_VirtualSystemSettingDataComponent') for sasd in sasds: if (sasd.ResourceType == 17 and sasd.ResourceSubType == "Microsoft:Hyper-V:Physical Disk Drive" and sasd.HostResource): # Replace the local disk target with the correct remote one old_disk_path = sasd.HostResource[0] new_disk_path = disk_paths_remote.pop(sasd.path().RelPath) LOG.debug(_("Replacing host resource " "%(old_disk_path)s with " "%(new_disk_path)s on planned VM %(vm_name)s"), {'old_disk_path': old_disk_path, 'new_disk_path': new_disk_path, 'vm_name': vm_name}) sasd.HostResource = [new_disk_path] updated_resource_setting_data.append(sasd.GetText_(1)) LOG.debug(_("Updating remote planned VM disk paths for VM: %s"), vm_name) vsmsvc = conn_v2_remote.Msvm_VirtualSystemManagementService()[0] (res_settings, job_path, ret_val) = vsmsvc.ModifyResourceSettings( ResourceSettings=updated_resource_setting_data) vmutils_remote.check_ret_val(ret_val, job_path) def _get_vhd_setting_data(self, vm): vm_settings = vm.associators( wmi_association_class='Msvm_SettingsDefineState', wmi_result_class='Msvm_VirtualSystemSettingData')[0] new_resource_setting_data = [] sasds = vm_settings.associators( wmi_association_class='Msvm_VirtualSystemSettingDataComponent', wmi_result_class='Msvm_StorageAllocationSettingData') for sasd in sasds: if (sasd.ResourceType == 31 and sasd.ResourceSubType == "Microsoft:Hyper-V:Virtual Hard Disk"): #sasd.PoolId = "" new_resource_setting_data.append(sasd.GetText_(1)) return new_resource_setting_data def _live_migrate_vm(self, conn_v2_local, vm, planned_vm, rmt_ip_addr_list, new_resource_setting_data, dest_host): # VirtualSystemAndStorage vsmsd = conn_v2_local.query("select * from " "Msvm_VirtualSystemMigrationSettingData " "where MigrationType = 32771")[0] vsmsd.DestinationIPAddressList = rmt_ip_addr_list if planned_vm: vsmsd.DestinationPlannedVirtualSystemId = planned_vm.Name migration_setting_data = vsmsd.GetText_(1) migr_svc = conn_v2_local.Msvm_VirtualSystemMigrationService()[0] LOG.debug(_("Starting live migration for VM: %s"), vm.ElementName) (job_path, ret_val) = migr_svc.MigrateVirtualSystemToHost( ComputerSystem=vm.path_(), DestinationHost=dest_host, MigrationSettingData=migration_setting_data, NewResourceSettingData=new_resource_setting_data) self._vmutils.check_ret_val(ret_val, job_path) def _get_remote_ip_address_list(self, conn_v2_remote, dest_host): LOG.debug(_("Getting live migration networks for remote host: %s"), dest_host) migr_svc_rmt = conn_v2_remote.Msvm_VirtualSystemMigrationService()[0] return migr_svc_rmt.MigrationServiceListenerIPAddressList def live_migrate_vm(self, vm_name, dest_host): self.check_live_migration_config() conn_v2_local = self._get_conn_v2() conn_v2_remote = self._get_conn_v2(dest_host) vm = self._get_vm(conn_v2_local, vm_name) self._check_existing_planned_vm(conn_v2_remote, vm) rmt_ip_addr_list = self._get_remote_ip_address_list(conn_v2_remote, dest_host) iscsi_targets = [] planned_vm = None disk_paths = self._get_physical_disk_paths(vm_name) if disk_paths: vmutils_remote = vmutilsv2.VMUtilsV2(dest_host) (disk_paths_remote, iscsi_targets) = self._get_remote_disk_data(vmutils_remote, disk_paths, dest_host) planned_vm = self._create_remote_planned_vm(conn_v2_local, conn_v2_remote, vm, rmt_ip_addr_list, dest_host) self._update_planned_vm_disk_resources(vmutils_remote, conn_v2_remote, planned_vm, vm_name, disk_paths_remote) new_resource_setting_data = self._get_vhd_setting_data(vm) self._live_migrate_vm(conn_v2_local, vm, planned_vm, rmt_ip_addr_list, new_resource_setting_data, dest_host) # In case the caller wants to log off the targets after migration return iscsi_targets nova-2014.1.5/nova/virt/hyperv/networkutils.py0000664000567000056700000000514012540642544022445 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility class for network related operations. """ import sys import uuid if sys.platform == 'win32': import wmi from nova.openstack.common.gettextutils import _ from nova.virt.hyperv import vmutils class NetworkUtils(object): def __init__(self): if sys.platform == 'win32': self._conn = wmi.WMI(moniker='//./root/virtualization') def get_external_vswitch(self, vswitch_name): if vswitch_name: vswitches = self._conn.Msvm_VirtualSwitch(ElementName=vswitch_name) else: # Find the vswitch that is connected to the first physical nic. ext_port = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0] port = ext_port.associators(wmi_result_class='Msvm_SwitchPort')[0] vswitches = port.associators(wmi_result_class='Msvm_VirtualSwitch') if not len(vswitches): raise vmutils.HyperVException(_('vswitch "%s" not found') % vswitch_name) return vswitches[0].path_() def create_vswitch_port(self, vswitch_path, port_name): switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] #Create a port on the vswitch. (new_port, ret_val) = switch_svc.CreateSwitchPort( Name=str(uuid.uuid4()), FriendlyName=port_name, ScopeOfResidence="", VirtualSwitch=vswitch_path) if ret_val != 0: raise vmutils.HyperVException(_("Failed to create vswitch port " "%(port_name)s on switch " "%(vswitch_path)s") % {'port_name': port_name, 'vswitch_path': vswitch_path}) return new_port def vswitch_port_needed(self): # NOTE(alexpilotti): In WMI V2 the vswitch_path is set in the VM # setting data without the need for a vswitch port. return True nova-2014.1.5/nova/virt/hyperv/__init__.py0000664000567000056700000000130712540642544021433 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.hyperv import driver HyperVDriver = driver.HyperVDriver nova-2014.1.5/nova/virt/hyperv/pathutils.py0000775000567000056700000001435112540642544021717 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.hyperv import constants from oslo.config import cfg LOG = logging.getLogger(__name__) hyperv_opts = [ cfg.StrOpt('instances_path_share', default="", help='The name of a Windows share name mapped to the ' '"instances_path" dir and used by the resize feature ' 'to copy files to the target host. If left blank, an ' 'administrative share will be used, looking for the same ' '"instances_path" used locally'), ] CONF = cfg.CONF CONF.register_opts(hyperv_opts, 'hyperv') CONF.import_opt('instances_path', 'nova.compute.manager') class PathUtils(object): def open(self, path, mode): """Wrapper on __builtin__.open used to simplify unit testing.""" import __builtin__ return __builtin__.open(path, mode) def exists(self, path): return os.path.exists(path) def makedirs(self, path): os.makedirs(path) def remove(self, path): os.remove(path) def rename(self, src, dest): os.rename(src, dest) def copyfile(self, src, dest): self.copy(src, dest) def copy(self, src, dest): # With large files this is 2x-3x faster than shutil.copy(src, dest), # especially when copying to a UNC target. # shutil.copyfileobj(...) with a proper buffer is better than # shutil.copy(...) but still 20% slower than a shell copy. # It can be replaced with Win32 API calls to avoid the process # spawning overhead. output, ret = utils.execute('cmd.exe', '/C', 'copy', '/Y', src, dest) if ret: raise IOError(_('The file copy from %(src)s to %(dest)s failed') % {'src': src, 'dest': dest}) def rmtree(self, path): shutil.rmtree(path) def get_instances_dir(self, remote_server=None): local_instance_path = os.path.normpath(CONF.instances_path) if remote_server: if CONF.hyperv.instances_path_share: path = CONF.hyperv.instances_path_share else: # Use an administrative share path = local_instance_path.replace(':', '$') return ('\\\\%(remote_server)s\\%(path)s' % {'remote_server': remote_server, 'path': path}) else: return local_instance_path def _check_create_dir(self, path): if not self.exists(path): LOG.debug(_('Creating directory: %s') % path) self.makedirs(path) def _check_remove_dir(self, path): if self.exists(path): LOG.debug(_('Removing directory: %s') % path) self.rmtree(path) def _get_instances_sub_dir(self, dir_name, remote_server=None, create_dir=True, remove_dir=False): instances_path = self.get_instances_dir(remote_server) path = os.path.join(instances_path, dir_name) if remove_dir: self._check_remove_dir(path) if create_dir: self._check_create_dir(path) return path def get_instance_migr_revert_dir(self, instance_name, create_dir=False, remove_dir=False): dir_name = '%s_revert' % instance_name return self._get_instances_sub_dir(dir_name, None, create_dir, remove_dir) def get_instance_dir(self, instance_name, remote_server=None, create_dir=True, remove_dir=False): return self._get_instances_sub_dir(instance_name, remote_server, create_dir, remove_dir) def _lookup_vhd_path(self, instance_name, vhd_path_func): vhd_path = None for format_ext in ['vhd', 'vhdx']: test_path = vhd_path_func(instance_name, format_ext) if self.exists(test_path): vhd_path = test_path break return vhd_path def lookup_root_vhd_path(self, instance_name): return self._lookup_vhd_path(instance_name, self.get_root_vhd_path) def lookup_configdrive_path(self, instance_name): configdrive_path = None for format_ext in constants.DISK_FORMAT_MAP: test_path = self.get_configdrive_path(instance_name, format_ext) if self.exists(test_path): configdrive_path = test_path break return configdrive_path def lookup_ephemeral_vhd_path(self, instance_name): return self._lookup_vhd_path(instance_name, self.get_ephemeral_vhd_path) def get_root_vhd_path(self, instance_name, format_ext): instance_path = self.get_instance_dir(instance_name) return os.path.join(instance_path, 'root.' + format_ext.lower()) def get_configdrive_path(self, instance_name, format_ext): instance_path = self.get_instance_dir(instance_name) return os.path.join(instance_path, 'configdrive.' + format_ext.lower()) def get_ephemeral_vhd_path(self, instance_name, format_ext): instance_path = self.get_instance_dir(instance_name) return os.path.join(instance_path, 'ephemeral.' + format_ext.lower()) def get_base_vhd_dir(self): return self._get_instances_sub_dir('_base') def get_export_dir(self, instance_name): dir_name = os.path.join('export', instance_name) return self._get_instances_sub_dir(dir_name, create_dir=True, remove_dir=True) nova-2014.1.5/nova/virt/hyperv/volumeops.py0000664000567000056700000002657312540642544021741 0ustar jenkinsjenkins00000000000000# Copyright 2012 Pedro Navarro Perez # Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for Storage-related functions (attach, detach, etc). """ import time from oslo.config import cfg from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt import driver from nova.virt.hyperv import constants from nova.virt.hyperv import utilsfactory from nova.virt.hyperv import vmutils LOG = logging.getLogger(__name__) hyper_volumeops_opts = [ cfg.IntOpt('volume_attach_retry_count', default=10, help='The number of times to retry to attach a volume'), cfg.IntOpt('volume_attach_retry_interval', default=5, help='Interval between volume attachment attempts, in seconds'), cfg.IntOpt('mounted_disk_query_retry_count', default=10, help='The number of times to retry checking for a disk mounted ' 'via iSCSI.'), cfg.IntOpt('mounted_disk_query_retry_interval', default=5, help='Interval between checks for a mounted iSCSI ' 'disk, in seconds.'), ] CONF = cfg.CONF CONF.register_opts(hyper_volumeops_opts, 'hyperv') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') class VolumeOps(object): """Management class for Volume-related tasks """ def __init__(self): self._hostutils = utilsfactory.get_hostutils() self._vmutils = utilsfactory.get_vmutils() self._volutils = utilsfactory.get_volumeutils() self._initiator = None self._default_root_device = 'vda' def ebs_root_in_block_devices(self, block_device_info): if block_device_info: root_device = block_device_info.get('root_device_name') if not root_device: root_device = self._default_root_device return self._volutils.volume_in_mapping(root_device, block_device_info) def attach_volumes(self, block_device_info, instance_name, ebs_root): mapping = driver.block_device_info_get_mapping(block_device_info) if ebs_root: self.attach_volume(mapping[0]['connection_info'], instance_name, True) mapping = mapping[1:] for vol in mapping: self.attach_volume(vol['connection_info'], instance_name) def login_storage_targets(self, block_device_info): mapping = driver.block_device_info_get_mapping(block_device_info) for vol in mapping: self._login_storage_target(vol['connection_info']) def _login_storage_target(self, connection_info): data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] target_portal = data['target_portal'] # Check if we already logged in if self._volutils.get_device_number_for_target(target_iqn, target_lun): LOG.debug(_("Already logged in on storage target. No need to " "login. Portal: %(target_portal)s, " "IQN: %(target_iqn)s, LUN: %(target_lun)s"), {'target_portal': target_portal, 'target_iqn': target_iqn, 'target_lun': target_lun}) else: LOG.debug(_("Logging in on storage target. Portal: " "%(target_portal)s, IQN: %(target_iqn)s, " "LUN: %(target_lun)s"), {'target_portal': target_portal, 'target_iqn': target_iqn, 'target_lun': target_lun}) self._volutils.login_storage_target(target_lun, target_iqn, target_portal) # Wait for the target to be mounted self._get_mounted_disk_from_lun(target_iqn, target_lun, True) def attach_volume(self, connection_info, instance_name, ebs_root=False): """Attach a volume to the SCSI controller or to the IDE controller if ebs_root is True """ target_iqn = None LOG.debug(_("Attach_volume: %(connection_info)s to %(instance_name)s"), {'connection_info': connection_info, 'instance_name': instance_name}) try: self._login_storage_target(connection_info) data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] #Getting the mounted disk mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, target_lun) if ebs_root: #Find the IDE controller for the vm. ctrller_path = self._vmutils.get_vm_ide_controller( instance_name, 0) #Attaching to the first slot slot = 0 else: #Find the SCSI controller for the vm ctrller_path = self._vmutils.get_vm_scsi_controller( instance_name) slot = self._get_free_controller_slot(ctrller_path) self._vmutils.attach_volume_to_controller(instance_name, ctrller_path, slot, mounted_disk_path) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_('Unable to attach volume to instance %s'), instance_name) if target_iqn: self._volutils.logout_storage_target(target_iqn) def _get_free_controller_slot(self, scsi_controller_path): attached_disks = self._vmutils.get_attached_disks(scsi_controller_path) used_slots = [int(disk.AddressOnParent) for disk in attached_disks] for slot in xrange(constants.SCSI_CONTROLLER_SLOTS_NUMBER): if slot not in used_slots: return slot raise vmutils.HyperVException("Exceeded the maximum number of slots") def detach_volumes(self, block_device_info, instance_name): mapping = driver.block_device_info_get_mapping(block_device_info) for vol in mapping: self.detach_volume(vol['connection_info'], instance_name) def logout_storage_target(self, target_iqn): LOG.debug(_("Logging off storage target %s"), target_iqn) self._volutils.logout_storage_target(target_iqn) def detach_volume(self, connection_info, instance_name): """Detach a volume to the SCSI controller.""" LOG.debug(_("Detach_volume: %(connection_info)s " "from %(instance_name)s"), {'connection_info': connection_info, 'instance_name': instance_name}) data = connection_info['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] #Getting the mounted disk mounted_disk_path = self._get_mounted_disk_from_lun(target_iqn, target_lun) LOG.debug(_("Detaching physical disk from instance: %s"), mounted_disk_path) self._vmutils.detach_vm_disk(instance_name, mounted_disk_path) self.logout_storage_target(target_iqn) def get_volume_connector(self, instance): if not self._initiator: self._initiator = self._volutils.get_iscsi_initiator() if not self._initiator: LOG.warn(_('Could not determine iscsi initiator name'), instance=instance) return { 'ip': CONF.my_ip, 'host': CONF.host, 'initiator': self._initiator, } def _get_mounted_disk_from_lun(self, target_iqn, target_lun, wait_for_device=False): # The WMI query in get_device_number_for_target can incorrectly # return no data when the system is under load. This issue can # be avoided by adding a retry. for i in xrange(CONF.hyperv.mounted_disk_query_retry_count): device_number = self._volutils.get_device_number_for_target( target_iqn, target_lun) if device_number in (None, -1): attempt = i + 1 LOG.debug(_('Attempt %d to get device_number ' 'from get_device_number_for_target failed. ' 'Retrying...') % attempt) time.sleep(CONF.hyperv.mounted_disk_query_retry_interval) else: break if device_number in (None, -1): raise exception.NotFound(_('Unable to find a mounted disk for ' 'target_iqn: %s') % target_iqn) LOG.debug(_('Device number: %(device_number)s, ' 'target lun: %(target_lun)s'), {'device_number': device_number, 'target_lun': target_lun}) #Finding Mounted disk drive for i in range(0, CONF.hyperv.volume_attach_retry_count): mounted_disk_path = self._vmutils.get_mounted_disk_by_drive_number( device_number) if mounted_disk_path or not wait_for_device: break time.sleep(CONF.hyperv.volume_attach_retry_interval) if not mounted_disk_path: raise exception.NotFound(_('Unable to find a mounted disk ' 'for target_iqn: %s') % target_iqn) return mounted_disk_path def disconnect_volume(self, physical_drive_path): #Get the session_id of the ISCSI connection session_id = self._volutils.get_session_id_from_mounted_disk( physical_drive_path) #Logging out the target self._volutils.execute_log_out(session_id) def get_target_from_disk_path(self, physical_drive_path): return self._volutils.get_target_from_disk_path(physical_drive_path) def fix_instance_volume_disk_paths(self, instance_name, block_device_info): mapping = driver.block_device_info_get_mapping(block_device_info) if self.ebs_root_in_block_devices(block_device_info): mapping = mapping[1:] disk_address = 0 for vol in mapping: data = vol['connection_info']['data'] target_lun = data['target_lun'] target_iqn = data['target_iqn'] mounted_disk_path = self._get_mounted_disk_from_lun( target_iqn, target_lun, True) ctrller_path = self._vmutils.get_vm_scsi_controller(instance_name) self._vmutils.set_disk_host_resource( instance_name, ctrller_path, disk_address, mounted_disk_path) disk_address += 1 nova-2014.1.5/nova/virt/hyperv/vmutils.py0000664000567000056700000006553312540642544021412 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Cloud.com, Inc # Copyright 2012 Cloudbase Solutions Srl / Pedro Navarro Perez # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility class for VM related operations on Hyper-V. """ import sys import time import uuid if sys.platform == 'win32': import wmi from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _, _LW from nova.openstack.common import log as logging from nova.virt.hyperv import constants CONF = cfg.CONF LOG = logging.getLogger(__name__) # TODO(alexpilotti): Move the exceptions to a separate module # TODO(alexpilotti): Add more domain exceptions class HyperVException(exception.NovaException): def __init__(self, message=None): super(HyperVException, self).__init__(message) # TODO(alexpilotti): Add a storage exception base class class VHDResizeException(HyperVException): def __init__(self, message=None): super(HyperVException, self).__init__(message) class HyperVAuthorizationException(HyperVException): def __init__(self, message=None): super(HyperVException, self).__init__(message) class UnsupportedConfigDriveFormatException(HyperVException): def __init__(self, message=None): super(HyperVException, self).__init__(message) class VMUtils(object): # These constants can be overridden by inherited classes _PHYS_DISK_RES_SUB_TYPE = 'Microsoft Physical Disk Drive' _DISK_RES_SUB_TYPE = 'Microsoft Synthetic Disk Drive' _DVD_RES_SUB_TYPE = 'Microsoft Synthetic DVD Drive' _IDE_DISK_RES_SUB_TYPE = 'Microsoft Virtual Hard Disk' _IDE_DVD_RES_SUB_TYPE = 'Microsoft Virtual CD/DVD Disk' _IDE_CTRL_RES_SUB_TYPE = 'Microsoft Emulated IDE Controller' _SCSI_CTRL_RES_SUB_TYPE = 'Microsoft Synthetic SCSI Controller' _SETTINGS_DEFINE_STATE_CLASS = 'Msvm_SettingsDefineState' _VIRTUAL_SYSTEM_SETTING_DATA_CLASS = 'Msvm_VirtualSystemSettingData' _RESOURCE_ALLOC_SETTING_DATA_CLASS = 'Msvm_ResourceAllocationSettingData' _PROCESSOR_SETTING_DATA_CLASS = 'Msvm_ProcessorSettingData' _MEMORY_SETTING_DATA_CLASS = 'Msvm_MemorySettingData' _STORAGE_ALLOC_SETTING_DATA_CLASS = _RESOURCE_ALLOC_SETTING_DATA_CLASS _SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS = \ 'Msvm_SyntheticEthernetPortSettingData' _AFFECTED_JOB_ELEMENT_CLASS = "Msvm_AffectedJobElement" _VIRTUAL_SYSTEM_CURRENT_SETTINGS = 3 _AUTOMATIC_STARTUP_ACTION_NONE = 0 _vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2, constants.HYPERV_VM_STATE_DISABLED: 3, constants.HYPERV_VM_STATE_REBOOT: 10, constants.HYPERV_VM_STATE_PAUSED: 32768, constants.HYPERV_VM_STATE_SUSPENDED: 32769} def __init__(self, host='.'): self._enabled_states_map = dict((v, k) for k, v in self._vm_power_states_map.iteritems()) if sys.platform == 'win32': self._init_hyperv_wmi_conn(host) self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host) def _init_hyperv_wmi_conn(self, host): self._conn = wmi.WMI(moniker='//%s/root/virtualization' % host) def list_instance_notes(self): instance_notes = [] for vs in self._conn.Msvm_VirtualSystemSettingData( ['ElementName', 'Notes'], SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS): instance_notes.append((vs.ElementName, [v for v in vs.Notes.split('\n') if v])) return instance_notes def list_instances(self): """Return the names of all the instances known to Hyper-V.""" return [v.ElementName for v in self._conn.Msvm_VirtualSystemSettingData( ['ElementName'], SettingType=self._VIRTUAL_SYSTEM_CURRENT_SETTINGS)] def get_vm_summary_info(self, vm_name): vm = self._lookup_vm_check(vm_name) vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] vmsettings = vm.associators( wmi_association_class=self._SETTINGS_DEFINE_STATE_CLASS, wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) settings_paths = [v.path_() for v in vmsettings] #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx (ret_val, summary_info) = vs_man_svc.GetSummaryInformation( [constants.VM_SUMMARY_NUM_PROCS, constants.VM_SUMMARY_ENABLED_STATE, constants.VM_SUMMARY_MEMORY_USAGE, constants.VM_SUMMARY_UPTIME], settings_paths) if ret_val: raise HyperVException(_('Cannot get VM summary data for: %s') % vm_name) si = summary_info[0] memory_usage = None if si.MemoryUsage is not None: memory_usage = long(si.MemoryUsage) up_time = None if si.UpTime is not None: up_time = long(si.UpTime) # Nova requires a valid state to be returned. Hyper-V has more # states than Nova, typically intermediate ones and since there is # no direct mapping for those, ENABLED is the only reasonable option # considering that in all the non mappable states the instance # is running. enabled_state = self._enabled_states_map.get(si.EnabledState, constants.HYPERV_VM_STATE_ENABLED) summary_info_dict = {'NumberOfProcessors': si.NumberOfProcessors, 'EnabledState': enabled_state, 'MemoryUsage': memory_usage, 'UpTime': up_time} return summary_info_dict def _lookup_vm_check(self, vm_name): vm = self._lookup_vm(vm_name) if not vm: raise exception.NotFound(_('VM not found: %s') % vm_name) return vm def _lookup_vm(self, vm_name): vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) n = len(vms) if n == 0: return None elif n > 1: raise HyperVException(_('Duplicate VM name found: %s') % vm_name) else: return vms[0] def vm_exists(self, vm_name): return self._lookup_vm(vm_name) is not None def get_vm_id(self, vm_name): vm = self._lookup_vm_check(vm_name) return vm.Name def _get_vm_setting_data(self, vm): vmsettings = vm.associators( wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) # Avoid snapshots return [s for s in vmsettings if s.SettingType == 3][0] def _set_vm_memory(self, vm, vmsetting, memory_mb, dynamic_memory_ratio): mem_settings = vmsetting.associators( wmi_result_class=self._MEMORY_SETTING_DATA_CLASS)[0] max_mem = long(memory_mb) mem_settings.Limit = max_mem if dynamic_memory_ratio > 1: mem_settings.DynamicMemoryEnabled = True # Must be a multiple of 2 reserved_mem = min( long(max_mem / dynamic_memory_ratio) >> 1 << 1, max_mem) else: mem_settings.DynamicMemoryEnabled = False reserved_mem = max_mem mem_settings.Reservation = reserved_mem # Start with the minimum memory mem_settings.VirtualQuantity = reserved_mem self._modify_virt_resource(mem_settings, vm.path_()) def _set_vm_vcpus(self, vm, vmsetting, vcpus_num, limit_cpu_features): procsetting = vmsetting.associators( wmi_result_class=self._PROCESSOR_SETTING_DATA_CLASS)[0] vcpus = long(vcpus_num) procsetting.VirtualQuantity = vcpus procsetting.Reservation = vcpus procsetting.Limit = 100000 # static assignment to 100% procsetting.LimitProcessorFeatures = limit_cpu_features self._modify_virt_resource(procsetting, vm.path_()) def update_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features, dynamic_memory_ratio): vm = self._lookup_vm_check(vm_name) vmsetting = self._get_vm_setting_data(vm) self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio) self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features) def check_admin_permissions(self): if not self._conn.Msvm_VirtualSystemManagementService(): msg = _("The Windows account running nova-compute on this Hyper-V" " host doesn't have the required permissions to create or" " operate the virtual machine.") raise HyperVAuthorizationException(msg) def create_vm(self, vm_name, memory_mb, vcpus_num, limit_cpu_features, dynamic_memory_ratio, notes=None): """Creates a VM.""" vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] LOG.debug(_('Creating VM %s'), vm_name) vm = self._create_vm_obj(vs_man_svc, vm_name, notes) vmsetting = self._get_vm_setting_data(vm) LOG.debug(_('Setting memory for vm %s'), vm_name) self._set_vm_memory(vm, vmsetting, memory_mb, dynamic_memory_ratio) LOG.debug(_('Set vCPUs for vm %s'), vm_name) self._set_vm_vcpus(vm, vmsetting, vcpus_num, limit_cpu_features) def _create_vm_obj(self, vs_man_svc, vm_name, notes): vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() vs_gs_data.ElementName = vm_name # Don't start automatically on host boot vs_gs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE (vm_path, job_path, ret_val) = vs_man_svc.DefineVirtualSystem([], None, vs_gs_data.GetText_(1)) self.check_ret_val(ret_val, job_path) vm = self._get_wmi_obj(vm_path) if notes: vmsetting = self._get_vm_setting_data(vm) vmsetting.Notes = '\n'.join(notes) self._modify_virtual_system(vs_man_svc, vm_path, vmsetting) return self._get_wmi_obj(vm_path) def _modify_virtual_system(self, vs_man_svc, vm_path, vmsetting): (job_path, ret_val) = vs_man_svc.ModifyVirtualSystem( ComputerSystem=vm_path, SystemSettingData=vmsetting.GetText_(1))[1:] self.check_ret_val(ret_val, job_path) def get_vm_scsi_controller(self, vm_name): vm = self._lookup_vm_check(vm_name) vmsettings = vm.associators( wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) rasds = vmsettings[0].associators( wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS) res = [r for r in rasds if r.ResourceSubType == self._SCSI_CTRL_RES_SUB_TYPE][0] return res.path_() def _get_vm_ide_controller(self, vm, ctrller_addr): vmsettings = vm.associators( wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) rasds = vmsettings[0].associators( wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS) return [r for r in rasds if r.ResourceSubType == self._IDE_CTRL_RES_SUB_TYPE and r.Address == str(ctrller_addr)][0].path_() def get_vm_ide_controller(self, vm_name, ctrller_addr): vm = self._lookup_vm_check(vm_name) return self._get_vm_ide_controller(vm, ctrller_addr) def get_attached_disks(self, scsi_controller_path): volumes = self._conn.query("SELECT * FROM %(class_name)s " "WHERE ResourceSubType = " "'%(res_sub_type)s' AND " "Parent = '%(parent)s'" % {"class_name": self._RESOURCE_ALLOC_SETTING_DATA_CLASS, 'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE, 'parent': scsi_controller_path.replace("'", "''")}) return volumes def _get_new_setting_data(self, class_name): return self._conn.query("SELECT * FROM %s WHERE InstanceID " "LIKE '%%\\Default'" % class_name)[0] def _get_new_resource_setting_data(self, resource_sub_type, class_name=None): if class_name is None: class_name = self._RESOURCE_ALLOC_SETTING_DATA_CLASS return self._conn.query("SELECT * FROM %(class_name)s " "WHERE ResourceSubType = " "'%(res_sub_type)s' AND " "InstanceID LIKE '%%\\Default'" % {"class_name": class_name, "res_sub_type": resource_sub_type})[0] def attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, drive_type=constants.IDE_DISK): """Create an IDE drive and attach it to the vm.""" vm = self._lookup_vm_check(vm_name) ctrller_path = self._get_vm_ide_controller(vm, ctrller_addr) if drive_type == constants.IDE_DISK: res_sub_type = self._DISK_RES_SUB_TYPE elif drive_type == constants.IDE_DVD: res_sub_type = self._DVD_RES_SUB_TYPE drive = self._get_new_resource_setting_data(res_sub_type) #Set the IDE ctrller as parent. drive.Parent = ctrller_path drive.Address = drive_addr #Add the cloned disk drive object to the vm. new_resources = self._add_virt_resource(drive, vm.path_()) drive_path = new_resources[0] if drive_type == constants.IDE_DISK: res_sub_type = self._IDE_DISK_RES_SUB_TYPE elif drive_type == constants.IDE_DVD: res_sub_type = self._IDE_DVD_RES_SUB_TYPE res = self._get_new_resource_setting_data(res_sub_type) #Set the new drive as the parent. res.Parent = drive_path res.Connection = [path] #Add the new vhd object as a virtual hard disk to the vm. self._add_virt_resource(res, vm.path_()) def create_scsi_controller(self, vm_name): """Create an iscsi controller ready to mount volumes.""" vm = self._lookup_vm_check(vm_name) scsicontrl = self._get_new_resource_setting_data( self._SCSI_CTRL_RES_SUB_TYPE) scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] self._add_virt_resource(scsicontrl, vm.path_()) def attach_volume_to_controller(self, vm_name, controller_path, address, mounted_disk_path): """Attach a volume to a controller.""" vm = self._lookup_vm_check(vm_name) diskdrive = self._get_new_resource_setting_data( self._PHYS_DISK_RES_SUB_TYPE) diskdrive.Address = address diskdrive.Parent = controller_path diskdrive.HostResource = [mounted_disk_path] self._add_virt_resource(diskdrive, vm.path_()) def _get_disk_resource_address(self, disk_resource): return disk_resource.Address def set_disk_host_resource(self, vm_name, controller_path, address, mounted_disk_path): disk_found = False vm = self._lookup_vm_check(vm_name) (disk_resources, volume_resources) = self._get_vm_disks(vm) for disk_resource in disk_resources + volume_resources: if (disk_resource.Parent == controller_path and self._get_disk_resource_address(disk_resource) == str(address)): if (disk_resource.HostResource and disk_resource.HostResource[0] != mounted_disk_path): LOG.debug('Updating disk host resource "%(old)s" to ' '"%(new)s"' % {'old': disk_resource.HostResource[0], 'new': mounted_disk_path}) disk_resource.HostResource = [mounted_disk_path] self._modify_virt_resource(disk_resource, vm.path_()) disk_found = True break if not disk_found: LOG.warn(_LW('Disk not found on controller "%(controller_path)s" ' 'with address "%(address)s"'), {'controller_path': controller_path, 'address': address}) def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data): nic_data = self._get_nic_data_by_name(nic_name) nic_data.Connection = [vswitch_conn_data] vm = self._lookup_vm_check(vm_name) self._modify_virt_resource(nic_data, vm.path_()) def _get_nic_data_by_name(self, name): return self._conn.Msvm_SyntheticEthernetPortSettingData( ElementName=name)[0] def create_nic(self, vm_name, nic_name, mac_address): """Create a (synthetic) nic and attach it to the vm.""" #Create a new nic new_nic_data = self._get_new_setting_data( self._SYNTHETIC_ETHERNET_PORT_SETTING_DATA_CLASS) #Configure the nic new_nic_data.ElementName = nic_name new_nic_data.Address = mac_address.replace(':', '') new_nic_data.StaticMacAddress = 'True' new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] #Add the new nic to the vm vm = self._lookup_vm_check(vm_name) self._add_virt_resource(new_nic_data, vm.path_()) def set_vm_state(self, vm_name, req_state): """Set the desired state of the VM.""" vm = self._lookup_vm_check(vm_name) (job_path, ret_val) = vm.RequestStateChange(self._vm_power_states_map[req_state]) #Invalid state for current operation (32775) typically means that #the VM is already in the state requested self.check_ret_val(ret_val, job_path, [0, 32775]) LOG.debug(_("Successfully changed vm state of %(vm_name)s " "to %(req_state)s"), {'vm_name': vm_name, 'req_state': req_state}) def _get_disk_resource_disk_path(self, disk_resource): return disk_resource.Connection def get_vm_storage_paths(self, vm_name): vm = self._lookup_vm_check(vm_name) (disk_resources, volume_resources) = self._get_vm_disks(vm) volume_drives = [] for volume_resource in volume_resources: drive_path = volume_resource.HostResource[0] volume_drives.append(drive_path) disk_files = [] for disk_resource in disk_resources: disk_files.extend( [c for c in self._get_disk_resource_disk_path(disk_resource)]) return (disk_files, volume_drives) def _get_vm_disks(self, vm): vmsettings = vm.associators( wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS) rasds = vmsettings[0].associators( wmi_result_class=self._STORAGE_ALLOC_SETTING_DATA_CLASS) disk_resources = [r for r in rasds if r.ResourceSubType in [self._IDE_DISK_RES_SUB_TYPE, self._IDE_DVD_RES_SUB_TYPE]] if (self._RESOURCE_ALLOC_SETTING_DATA_CLASS != self._STORAGE_ALLOC_SETTING_DATA_CLASS): rasds = vmsettings[0].associators( wmi_result_class=self._RESOURCE_ALLOC_SETTING_DATA_CLASS) volume_resources = [r for r in rasds if r.ResourceSubType == self._PHYS_DISK_RES_SUB_TYPE] return (disk_resources, volume_resources) def destroy_vm(self, vm_name): vm = self._lookup_vm_check(vm_name) vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] #Remove the VM. Does not destroy disks. (job_path, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) self.check_ret_val(ret_val, job_path) def check_ret_val(self, ret_val, job_path, success_values=[0]): if ret_val == constants.WMI_JOB_STATUS_STARTED: return self._wait_for_job(job_path) elif ret_val not in success_values: raise HyperVException(_('Operation failed with return value: %s') % ret_val) def _wait_for_job(self, job_path): """Poll WMI job state and wait for completion.""" job = self._get_wmi_obj(job_path) while job.JobState == constants.WMI_JOB_STATE_RUNNING: time.sleep(0.1) job = self._get_wmi_obj(job_path) if job.JobState != constants.WMI_JOB_STATE_COMPLETED: job_state = job.JobState if job.path().Class == "Msvm_ConcreteJob": err_sum_desc = job.ErrorSummaryDescription err_desc = job.ErrorDescription err_code = job.ErrorCode raise HyperVException(_("WMI job failed with status " "%(job_state)d. Error details: " "%(err_sum_desc)s - %(err_desc)s - " "Error code: %(err_code)d") % {'job_state': job_state, 'err_sum_desc': err_sum_desc, 'err_desc': err_desc, 'err_code': err_code}) else: (error, ret_val) = job.GetError() if not ret_val and error: raise HyperVException(_("WMI job failed with status " "%(job_state)d. Error details: " "%(error)s") % {'job_state': job_state, 'error': error}) else: raise HyperVException(_("WMI job failed with status " "%d. No error " "description available") % job_state) desc = job.Description elap = job.ElapsedTime LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s"), {'desc': desc, 'elap': elap}) return job def _get_wmi_obj(self, path): return wmi.WMI(moniker=path.replace('\\', '/')) def _add_virt_resource(self, res_setting_data, vm_path): """Adds a new resource to the VM.""" vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] res_xml = [res_setting_data.GetText_(1)] (job_path, new_resources, ret_val) = vs_man_svc.AddVirtualSystemResources(res_xml, vm_path) self.check_ret_val(ret_val, job_path) return new_resources def _modify_virt_resource(self, res_setting_data, vm_path): """Updates a VM resource.""" vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] (job_path, ret_val) = vs_man_svc.ModifyVirtualSystemResources( ResourceSettingData=[res_setting_data.GetText_(1)], ComputerSystem=vm_path) self.check_ret_val(ret_val, job_path) def _remove_virt_resource(self, res_setting_data, vm_path): """Removes a VM resource.""" vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] res_path = [res_setting_data.path_()] (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemResources(res_path, vm_path) self.check_ret_val(ret_val, job_path) def take_vm_snapshot(self, vm_name): vm = self._lookup_vm_check(vm_name) vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] (job_path, ret_val, snp_setting_data) = vs_man_svc.CreateVirtualSystemSnapshot(vm.path_()) self.check_ret_val(ret_val, job_path) job_wmi_path = job_path.replace('\\', '/') job = wmi.WMI(moniker=job_wmi_path) snp_setting_data = job.associators( wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0] return snp_setting_data.path_() def remove_vm_snapshot(self, snapshot_path): vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] (job_path, ret_val) = vs_man_svc.RemoveVirtualSystemSnapshot( snapshot_path) self.check_ret_val(ret_val, job_path) def detach_vm_disk(self, vm_name, disk_path): vm = self._lookup_vm_check(vm_name) physical_disk = self._get_mounted_disk_resource_from_path(disk_path) if physical_disk: self._remove_virt_resource(physical_disk, vm.path_()) def _get_mounted_disk_resource_from_path(self, disk_path): physical_disks = self._conn.query("SELECT * FROM %(class_name)s " "WHERE ResourceSubType = '%(res_sub_type)s'" % {"class_name": self._RESOURCE_ALLOC_SETTING_DATA_CLASS, 'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE}) for physical_disk in physical_disks: if physical_disk.HostResource: if physical_disk.HostResource[0].lower() == disk_path.lower(): return physical_disk def get_mounted_disk_by_drive_number(self, device_number): mounted_disks = self._conn.query("SELECT * FROM Msvm_DiskDrive " "WHERE DriveNumber=" + str(device_number)) if len(mounted_disks): return mounted_disks[0].path_() def get_controller_volume_paths(self, controller_path): disks = self._conn.query("SELECT * FROM %(class_name)s " "WHERE ResourceSubType = '%(res_sub_type)s' " "AND Parent='%(parent)s'" % {"class_name": self._RESOURCE_ALLOC_SETTING_DATA_CLASS, "res_sub_type": self._PHYS_DISK_RES_SUB_TYPE, "parent": controller_path}) disk_data = {} for disk in disks: if disk.HostResource: disk_data[disk.path().RelPath] = disk.HostResource[0] return disk_data def enable_vm_metrics_collection(self, vm_name): raise NotImplementedError(_("Metrics collection is not supported on " "this version of Hyper-V")) nova-2014.1.5/nova/virt/hyperv/rdpconsoleutilsv2.py0000664000567000056700000000210012540642532023362 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from nova.virt.hyperv import rdpconsoleutils if sys.platform == 'win32': import wmi class RDPConsoleUtilsV2(rdpconsoleutils.RDPConsoleUtils): def __init__(self): if sys.platform == 'win32': self._conn = wmi.WMI(moniker='//./root/virtualization/v2') def get_rdp_console_port(self): rdp_setting_data = self._conn.Msvm_TerminalServiceSettingData()[0] return rdp_setting_data.ListenerPort nova-2014.1.5/nova/virt/hyperv/volumeutils.py0000664000567000056700000001125012540642544022262 0ustar jenkinsjenkins00000000000000# Copyright 2012 Pedro Navarro Perez # Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods for operations related to the management of volumes, and storage repositories Official Microsoft iSCSI Initiator and iSCSI command line interface documentation can be retrieved at: http://www.microsoft.com/en-us/download/details.aspx?id=34750 """ import re import time from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.hyperv import basevolumeutils from nova.virt.hyperv import vmutils LOG = logging.getLogger(__name__) CONF = cfg.CONF class VolumeUtils(basevolumeutils.BaseVolumeUtils): def __init__(self): super(VolumeUtils, self).__init__() def execute(self, *args, **kwargs): stdout_value, stderr_value = utils.execute(*args, **kwargs) if stdout_value.find('The operation completed successfully') == -1: raise vmutils.HyperVException(_('An error has occurred when ' 'calling the iscsi initiator: %s') % stdout_value) return stdout_value def _login_target_portal(self, target_portal): (target_address, target_port) = utils.parse_server_string(target_portal) output = self.execute('iscsicli.exe', 'ListTargetPortals') pattern = r'Address and Socket *: (.*)' portals = [addr.split() for addr in re.findall(pattern, output)] LOG.debug("Ensuring connection to portal: %s" % target_portal) if [target_address, str(target_port)] in portals: self.execute('iscsicli.exe', 'RefreshTargetPortal', target_address, target_port) else: #Adding target portal to iscsi initiator. Sending targets self.execute('iscsicli.exe', 'AddTargetPortal', target_address, target_port, '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*', '*') def login_storage_target(self, target_lun, target_iqn, target_portal): """Ensure that the target is logged in.""" self._login_target_portal(target_portal) #Listing targets self.execute('iscsicli.exe', 'ListTargets') retry_count = CONF.hyperv.volume_attach_retry_count # If the target is not connected, at least two iterations are needed: # one for performing the login and another one for checking if the # target was logged in successfully. if retry_count < 2: retry_count = 2 for attempt in xrange(retry_count): try: session_info = self.execute('iscsicli.exe', 'SessionList') if session_info.find(target_iqn) == -1: # Sending login self.execute('iscsicli.exe', 'qlogintarget', target_iqn) else: return except vmutils.HyperVException as exc: LOG.debug(_("Attempt %(attempt)d to connect to target " "%(target_iqn)s failed. Retrying. " "Exceptipn: %(exc)s ") % {'target_iqn': target_iqn, 'exc': exc, 'attempt': attempt}) time.sleep(CONF.hyperv.volume_attach_retry_interval) raise vmutils.HyperVException(_('Failed to login target %s') % target_iqn) def logout_storage_target(self, target_iqn): """Logs out storage target through its session id.""" sessions = self._conn_wmi.query("SELECT * FROM " "MSiSCSIInitiator_SessionClass " "WHERE TargetName='%s'" % target_iqn) for session in sessions: self.execute_log_out(session.SessionId) def execute_log_out(self, session_id): """Executes log out of the session described by its session ID.""" self.execute('iscsicli.exe', 'logouttarget', session_id) nova-2014.1.5/nova/virt/hyperv/rdpconsoleutils.py0000664000567000056700000000142512540642532023123 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class RDPConsoleUtils(object): _DEFAULT_HYPERV_RDP_PORT = 2179 def get_rdp_console_port(self): return self._DEFAULT_HYPERV_RDP_PORT nova-2014.1.5/nova/virt/hyperv/basevolumeutils.py0000664000567000056700000001312412540642544023117 0ustar jenkinsjenkins00000000000000# # Copyright 2012 Pedro Navarro Perez # Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods for operations related to the management of volumes, and storage repositories """ import abc import sys if sys.platform == 'win32': import _winreg import wmi from nova import block_device from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt import driver LOG = logging.getLogger(__name__) class BaseVolumeUtils(object): def __init__(self, host='.'): if sys.platform == 'win32': self._conn_wmi = wmi.WMI(moniker='//%s/root/wmi' % host) self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host) @abc.abstractmethod def login_storage_target(self, target_lun, target_iqn, target_portal): pass @abc.abstractmethod def logout_storage_target(self, target_iqn): pass @abc.abstractmethod def execute_log_out(self, session_id): pass def get_iscsi_initiator(self): """Get iscsi initiator name for this machine.""" computer_system = self._conn_cimv2.Win32_ComputerSystem()[0] hostname = computer_system.name keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\" "iSCSI\\Discovery") try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0, _winreg.KEY_ALL_ACCESS) temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName') initiator_name = str(temp[0]) _winreg.CloseKey(key) except Exception: LOG.info(_("The ISCSI initiator name can't be found. " "Choosing the default one")) initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower() if computer_system.PartofDomain: initiator_name += '.' + computer_system.Domain.lower() return initiator_name def volume_in_mapping(self, mount_device, block_device_info): block_device_list = [block_device.strip_dev(vol['mount_device']) for vol in driver.block_device_info_get_mapping( block_device_info)] swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): block_device_list.append( block_device.strip_dev(swap['device_name'])) block_device_list += [block_device.strip_dev( ephemeral['device_name']) for ephemeral in driver.block_device_info_get_ephemerals(block_device_info)] LOG.debug(_("block_device_list %s"), block_device_list) return block_device.strip_dev(mount_device) in block_device_list def _get_drive_number_from_disk_path(self, disk_path): # TODO(pnavarro) replace with regex start_device_id = disk_path.find('"', disk_path.find('DeviceID')) end_device_id = disk_path.find('"', start_device_id + 1) device_id = disk_path[start_device_id + 1:end_device_id] drive_number = device_id[device_id.find("\\") + 2:] if drive_number == 'NODRIVE': return None return int(drive_number) def get_session_id_from_mounted_disk(self, physical_drive_path): drive_number = self._get_drive_number_from_disk_path( physical_drive_path) if not drive_number: return None initiator_sessions = self._conn_wmi.query("SELECT * FROM " "MSiSCSIInitiator_Session" "Class") for initiator_session in initiator_sessions: devices = initiator_session.Devices for device in devices: device_number = device.DeviceNumber if device_number == drive_number: return initiator_session.SessionId def get_device_number_for_target(self, target_iqn, target_lun): initiator_sessions = self._conn_wmi.query("SELECT * FROM " "MSiSCSIInitiator_Session" "Class WHERE TargetName='%s'" % target_iqn) if not initiator_sessions: return None devices = initiator_sessions[0].Devices if not devices: return None for device in devices: if device.ScsiLun == target_lun: return device.DeviceNumber def get_target_from_disk_path(self, disk_path): initiator_sessions = self._conn_wmi.MSiSCSIInitiator_SessionClass() drive_number = self._get_drive_number_from_disk_path(disk_path) if not drive_number: return None for initiator_session in initiator_sessions: devices = initiator_session.Devices for device in devices: if device.DeviceNumber == drive_number: return (device.TargetName, device.ScsiLun) nova-2014.1.5/nova/virt/hyperv/vhdutils.py0000664000567000056700000001725212540642544021544 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility class for VHD related operations. Official VHD format specs can be retrieved at: http://technet.microsoft.com/en-us/library/bb676673.aspx See "Download the Specifications Without Registering" Official VHDX format specs can be retrieved at: http://www.microsoft.com/en-us/download/details.aspx?id=34750 """ import struct import sys if sys.platform == 'win32': import wmi from nova.openstack.common.gettextutils import _ from nova.virt.hyperv import constants from nova.virt.hyperv import vmutils from xml.etree import ElementTree VHD_HEADER_SIZE_FIX = 512 VHD_BAT_ENTRY_SIZE = 4 VHD_DYNAMIC_DISK_HEADER_SIZE = 1024 VHD_HEADER_SIZE_DYNAMIC = 512 VHD_FOOTER_SIZE_DYNAMIC = 512 VHD_BLK_SIZE_OFFSET = 544 VHD_SIGNATURE = 'conectix' VHDX_SIGNATURE = 'vhdxfile' class VHDUtils(object): def __init__(self): self._vmutils = vmutils.VMUtils() if sys.platform == 'win32': self._conn = wmi.WMI(moniker='//./root/virtualization') def validate_vhd(self, vhd_path): image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.ValidateVirtualHardDisk( Path=vhd_path) self._vmutils.check_ret_val(ret_val, job_path) def create_dynamic_vhd(self, path, max_internal_size, format): if format != constants.DISK_FORMAT_VHD: raise vmutils.HyperVException(_("Unsupported disk format: %s") % format) image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.CreateDynamicVirtualHardDisk( Path=path, MaxInternalSize=max_internal_size) self._vmutils.check_ret_val(ret_val, job_path) def create_differencing_vhd(self, path, parent_path): image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.CreateDifferencingVirtualHardDisk( Path=path, ParentPath=parent_path) self._vmutils.check_ret_val(ret_val, job_path) def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path): image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.ReconnectParentVirtualHardDisk( ChildPath=child_vhd_path, ParentPath=parent_vhd_path, Force=True) self._vmutils.check_ret_val(ret_val, job_path) def merge_vhd(self, src_vhd_path, dest_vhd_path): image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.MergeVirtualHardDisk( SourcePath=src_vhd_path, DestinationPath=dest_vhd_path) self._vmutils.check_ret_val(ret_val, job_path) def _get_resize_method(self): image_man_svc = self._conn.Msvm_ImageManagementService()[0] return image_man_svc.ExpandVirtualHardDisk def resize_vhd(self, vhd_path, new_max_size, is_file_max_size=True): if is_file_max_size: new_internal_max_size = self.get_internal_vhd_size_by_file_size( vhd_path, new_max_size) else: new_internal_max_size = new_max_size resize = self._get_resize_method() (job_path, ret_val) = resize( Path=vhd_path, MaxInternalSize=new_internal_max_size) self._vmutils.check_ret_val(ret_val, job_path) def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size): """Fixed VHD size = Data Block size + 512 bytes Dynamic_VHD_size = Dynamic Disk Header + Copy of hard disk footer + Hard Disk Footer + Data Block + BAT Dynamic Disk header fields Copy of hard disk footer (512 bytes) Dynamic Disk Header (1024 bytes) BAT (Block Allocation table) Data Block 1 Data Block 2 Data Block n Hard Disk Footer (512 bytes) Default block size is 2M BAT entry size is 4byte """ base_vhd_info = self.get_vhd_info(vhd_path) vhd_type = base_vhd_info['Type'] if vhd_type == constants.VHD_TYPE_FIXED: vhd_header_size = VHD_HEADER_SIZE_FIX return new_vhd_file_size - vhd_header_size elif vhd_type == constants.VHD_TYPE_DYNAMIC: bs = self._get_vhd_dynamic_blk_size(vhd_path) bes = VHD_BAT_ENTRY_SIZE ddhs = VHD_DYNAMIC_DISK_HEADER_SIZE hs = VHD_HEADER_SIZE_DYNAMIC fs = VHD_FOOTER_SIZE_DYNAMIC max_internal_size = (new_vhd_file_size - (hs + ddhs + fs)) * bs / (bes + bs) return max_internal_size else: raise vmutils.HyperVException(_("The %(vhd_type)s type VHD " "is not supported") % {"vhd_type": vhd_type}) def _get_vhd_dynamic_blk_size(self, vhd_path): blk_size_offset = VHD_BLK_SIZE_OFFSET try: with open(vhd_path, "rb") as f: f.seek(blk_size_offset) version = f.read(4) except IOError: raise vmutils.HyperVException(_("Unable to obtain block size from" " VHD %(vhd_path)s") % {"vhd_path": vhd_path}) return struct.unpack('>i', version)[0] def get_vhd_parent_path(self, vhd_path): return self.get_vhd_info(vhd_path).get("ParentPath") def get_vhd_info(self, vhd_path): image_man_svc = self._conn.Msvm_ImageManagementService()[0] (vhd_info, job_path, ret_val) = image_man_svc.GetVirtualHardDiskInfo(vhd_path) self._vmutils.check_ret_val(ret_val, job_path) vhd_info_dict = {} et = ElementTree.fromstring(vhd_info) for item in et.findall("PROPERTY"): name = item.attrib["NAME"] value_text = item.find("VALUE").text if name == "ParentPath": vhd_info_dict[name] = value_text elif name in ["FileSize", "MaxInternalSize"]: vhd_info_dict[name] = long(value_text) elif name in ["InSavedState", "InUse"]: vhd_info_dict[name] = bool(value_text) elif name == "Type": vhd_info_dict[name] = int(value_text) return vhd_info_dict def get_vhd_format(self, path): with open(path, 'rb') as f: # Read header if f.read(8) == VHDX_SIGNATURE: return constants.DISK_FORMAT_VHDX # Read footer f.seek(0, 2) file_size = f.tell() if file_size >= 512: f.seek(-512, 2) if f.read(8) == VHD_SIGNATURE: return constants.DISK_FORMAT_VHD raise vmutils.HyperVException(_('Unsupported virtual disk format')) def get_best_supported_vhd_format(self): return constants.DISK_FORMAT_VHD nova-2014.1.5/nova/virt/hyperv/vhdutilsv2.py0000664000567000056700000002163012540642544022007 0ustar jenkinsjenkins00000000000000# Copyright 2013 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utility class for VHD related operations. Based on the "root/virtualization/v2" namespace available starting with Hyper-V Server / Windows Server 2012. """ import struct import sys if sys.platform == 'win32': import wmi from nova.openstack.common.gettextutils import _ from nova.openstack.common import units from nova.virt.hyperv import constants from nova.virt.hyperv import vhdutils from nova.virt.hyperv import vmutils from nova.virt.hyperv import vmutilsv2 from xml.etree import ElementTree VHDX_BAT_ENTRY_SIZE = 8 VHDX_HEADER_OFFSETS = [64 * units.Ki, 128 * units.Ki] VHDX_HEADER_SECTION_SIZE = units.Mi VHDX_LOG_LENGTH_OFFSET = 68 VHDX_METADATA_SIZE_OFFSET = 64 VHDX_REGION_TABLE_OFFSET = 192 * units.Ki VHDX_BS_METADATA_ENTRY_OFFSET = 48 class VHDUtilsV2(vhdutils.VHDUtils): _VHD_TYPE_DYNAMIC = 3 _VHD_TYPE_DIFFERENCING = 4 _vhd_format_map = { constants.DISK_FORMAT_VHD: 2, constants.DISK_FORMAT_VHDX: 3, } def __init__(self): self._vmutils = vmutilsv2.VMUtilsV2() if sys.platform == 'win32': self._conn = wmi.WMI(moniker='//./root/virtualization/v2') def create_dynamic_vhd(self, path, max_internal_size, format): vhd_format = self._vhd_format_map.get(format) if not vhd_format: raise vmutils.HyperVException(_("Unsupported disk format: %s") % format) self._create_vhd(self._VHD_TYPE_DYNAMIC, vhd_format, path, max_internal_size=max_internal_size) def create_differencing_vhd(self, path, parent_path): parent_vhd_info = self.get_vhd_info(parent_path) self._create_vhd(self._VHD_TYPE_DIFFERENCING, parent_vhd_info["Format"], path, parent_path=parent_path) def _create_vhd(self, vhd_type, format, path, max_internal_size=None, parent_path=None): vhd_info = self._conn.Msvm_VirtualHardDiskSettingData.new() vhd_info.Type = vhd_type vhd_info.Format = format vhd_info.Path = path vhd_info.ParentPath = parent_path if max_internal_size: vhd_info.MaxInternalSize = max_internal_size image_man_svc = self._conn.Msvm_ImageManagementService()[0] (job_path, ret_val) = image_man_svc.CreateVirtualHardDisk( VirtualDiskSettingData=vhd_info.GetText_(1)) self._vmutils.check_ret_val(ret_val, job_path) def reconnect_parent_vhd(self, child_vhd_path, parent_vhd_path): image_man_svc = self._conn.Msvm_ImageManagementService()[0] vhd_info_xml = self._get_vhd_info_xml(image_man_svc, child_vhd_path) # Can't use ".//PROPERTY[@NAME='ParentPath']/VALUE" due to # compatibility requirements with Python 2.6 et = ElementTree.fromstring(vhd_info_xml) for item in et.findall("PROPERTY"): name = item.attrib["NAME"] if name == 'ParentPath': item.find("VALUE").text = parent_vhd_path break vhd_info_xml = ElementTree.tostring(et) (job_path, ret_val) = image_man_svc.SetVirtualHardDiskSettingData( VirtualDiskSettingData=vhd_info_xml) self._vmutils.check_ret_val(ret_val, job_path) def _get_resize_method(self): image_man_svc = self._conn.Msvm_ImageManagementService()[0] return image_man_svc.ResizeVirtualHardDisk def get_internal_vhd_size_by_file_size(self, vhd_path, new_vhd_file_size): """VHDX Size = Header (1 MB) + Log + Metadata Region + BAT + Payload Blocks Chunk size = maximum number of bytes described by a SB block = 2 ** 23 * LogicalSectorSize """ vhd_format = self.get_vhd_format(vhd_path) if vhd_format == constants.DISK_FORMAT_VHD: return super(VHDUtilsV2, self).get_internal_vhd_size_by_file_size( vhd_path, new_vhd_file_size) else: vhd_info = self.get_vhd_info(vhd_path) vhd_type = vhd_info['Type'] if vhd_type == self._VHD_TYPE_DIFFERENCING: raise vmutils.HyperVException(_("Differencing VHDX images " "are not supported")) else: try: with open(vhd_path, 'rb') as f: hs = VHDX_HEADER_SECTION_SIZE bes = VHDX_BAT_ENTRY_SIZE lss = vhd_info['LogicalSectorSize'] bs = self._get_vhdx_block_size(f) ls = self._get_vhdx_log_size(f) ms = self._get_vhdx_metadata_size_and_offset(f)[0] chunk_ratio = (1 << 23) * lss / bs size = new_vhd_file_size max_internal_size = (bs * chunk_ratio * (size - hs - ls - ms - bes - bes / chunk_ratio) / (bs * chunk_ratio + bes * chunk_ratio + bes)) return max_internal_size - (max_internal_size % bs) except IOError as ex: raise vmutils.HyperVException(_("Unable to obtain " "internal size from VHDX: " "%(vhd_path)s. Exception: " "%(ex)s") % {"vhd_path": vhd_path, "ex": ex}) def _get_vhdx_current_header_offset(self, vhdx_file): sequence_numbers = [] for offset in VHDX_HEADER_OFFSETS: vhdx_file.seek(offset + 8) sequence_numbers.append(struct.unpack(' 1: time.sleep(sleep_time) sleep_time = min(2 * sleep_time, 15) if callback: callback(kwargs) return self.call_plugin_serialized(plugin, fn, *args, **kwargs) except self.XenAPI.Failure as exc: if self._is_retryable_exception(exc): LOG.warn(_('%(plugin)s.%(fn)s failed. Retrying call.') % {'plugin': plugin, 'fn': fn}) else: raise raise exception.PluginRetriesExceeded(num_retries=num_retries) def _is_retryable_exception(self, exc): _type, method, error = exc.details[:3] if error == 'RetryableError': LOG.debug(_("RetryableError, so retrying upload_vhd"), exc_info=True) return True elif "signal" in method: LOG.debug(_("Error due to a signal, retrying upload_vhd"), exc_info=True) return True else: return False def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" self.is_local_connection = url == "unix://local" if self.is_local_connection: return self.XenAPI.xapi_local() return self.XenAPI.Session(url) def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details.""" try: return func(*args, **kwargs) except self.XenAPI.Failure as exc: LOG.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): params = None try: # FIXME(comstud): eval is evil. params = eval(exc.details[3]) except Exception: raise exc raise self.XenAPI.Failure(params) else: raise except xmlrpclib.ProtocolError as exc: LOG.debug(_("Got exception: %s"), exc) raise def get_rec(self, record_type, ref): try: return self.call_xenapi('%s.get_record' % record_type, ref) except self.XenAPI.Failure as e: if e.details[0] != 'HANDLE_INVALID': raise return None def get_all_refs_and_recs(self, record_type): """Retrieve all refs and recs for a Xen record type. Handles race-conditions where the record may be deleted between the `get_all` call and the `get_record` call. """ return self.call_xenapi('%s.get_all_records' % record_type).items() nova-2014.1.5/nova/virt/xenapi/client/__init__.py0000664000567000056700000000114412540642544022657 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. nova-2014.1.5/nova/virt/xenapi/network_utils.py0000664000567000056700000000351512540642544022557 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods for operations related to the management of network records and their attributes like bridges, PIFs, QoS, as well as their lookup functions. """ from nova.openstack.common.gettextutils import _ def find_network_with_name_label(session, name_label): networks = session.call_xenapi('network.get_by_name_label', name_label) if len(networks) == 1: return networks[0] elif len(networks) > 1: raise Exception(_('Found non-unique network for name_label %s') % name_label) else: return None def find_network_with_bridge(session, bridge): """Return the network on which the bridge is attached, if found. The bridge is defined in the nova db and can be found either in the 'bridge' or 'name_label' fields of the XenAPI network record. """ expr = ('field "name__label" = "%s" or field "bridge" = "%s"' % (bridge, bridge)) networks = session.call_xenapi('network.get_all_records_where', expr) if len(networks) == 1: return networks.keys()[0] elif len(networks) > 1: raise Exception(_('Found non-unique network for bridge %s') % bridge) else: raise Exception(_('Found no network for bridge %s') % bridge) nova-2014.1.5/nova/virt/xenapi/image/0000775000567000056700000000000012540643452020351 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/xenapi/image/bittorrent.py0000664000567000056700000001335612540642544023130 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pkg_resources from oslo.config import cfg import six.moves.urllib.parse as urlparse from nova.openstack.common.gettextutils import _ import nova.openstack.common.log as logging from nova.virt.xenapi import vm_utils LOG = logging.getLogger(__name__) xenapi_torrent_opts = [ cfg.StrOpt('torrent_base_url', deprecated_name='xenapi_torrent_base_url', deprecated_group='DEFAULT', help='Base URL for torrent files.'), cfg.FloatOpt('torrent_seed_chance', default=1.0, deprecated_name='xenapi_torrent_seed_chance', deprecated_group='DEFAULT', help='Probability that peer will become a seeder.' ' (1.0 = 100%)'), cfg.IntOpt('torrent_seed_duration', default=3600, deprecated_name='xenapi_torrent_seed_duration', deprecated_group='DEFAULT', help='Number of seconds after downloading an image via' ' BitTorrent that it should be seeded for other peers.'), cfg.IntOpt('torrent_max_last_accessed', default=86400, deprecated_name='xenapi_torrent_max_last_accessed', deprecated_group='DEFAULT', help='Cached torrent files not accessed within this number of' ' seconds can be reaped'), cfg.IntOpt('torrent_listen_port_start', default=6881, deprecated_name='xenapi_torrent_listen_port_start', deprecated_group='DEFAULT', help='Beginning of port range to listen on'), cfg.IntOpt('torrent_listen_port_end', default=6891, deprecated_name='xenapi_torrent_listen_port_end', deprecated_group='DEFAULT', help='End of port range to listen on'), cfg.IntOpt('torrent_download_stall_cutoff', default=600, deprecated_name='xenapi_torrent_download_stall_cutoff', deprecated_group='DEFAULT', help='Number of seconds a download can remain at the same' ' progress percentage w/o being considered a stall'), cfg.IntOpt('torrent_max_seeder_processes_per_host', default=1, deprecated_name='xenapi_torrent_max_seeder_processes_per_host', deprecated_group='DEFAULT', help='Maximum number of seeder processes to run concurrently' ' within a given dom0. (-1 = no limit)') ] CONF = cfg.CONF # xenapi_torrent options in the DEFAULT group were deprecated in Icehouse CONF.register_opts(xenapi_torrent_opts, 'xenserver') class BittorrentStore(object): @staticmethod def _lookup_torrent_url_fn(): """Load a "fetcher" func to get the right torrent URL via entrypoints. """ if CONF.xenserver.torrent_base_url: def _default_torrent_url_fn(instance, image_id): return urlparse.urljoin(CONF.xenserver.torrent_base_url, "%s.torrent" % image_id) return _default_torrent_url_fn matches = [ep for ep in pkg_resources.iter_entry_points('nova.virt.xenapi.vm_utils') if ep.name == 'torrent_url'] if not matches: raise RuntimeError(_('Cannot create default bittorrent URL' ' without torrent_base_url set or' ' torrent URL fetcher extension')) elif len(matches) > 1: raise RuntimeError(_("Multiple torrent URL fetcher extensions" " found. Failing.")) else: ep = matches[0] LOG.debug(_("Loading torrent URL fetcher from entry points" " %(ep)s"), {'ep': ep}) fn = ep.load() return fn def download_image(self, context, session, instance, image_id): params = {} params['image_id'] = image_id params['uuid_stack'] = vm_utils._make_uuid_stack() params['sr_path'] = vm_utils.get_sr_path(session) params['torrent_seed_duration'] = CONF.xenserver.torrent_seed_duration params['torrent_seed_chance'] = CONF.xenserver.torrent_seed_chance params['torrent_max_last_accessed'] = \ CONF.xenserver.torrent_max_last_accessed params['torrent_listen_port_start'] = \ CONF.xenserver.torrent_listen_port_start params['torrent_listen_port_end'] = \ CONF.xenserver.torrent_listen_port_end params['torrent_download_stall_cutoff'] = \ CONF.xenserver.torrent_download_stall_cutoff params['torrent_max_seeder_processes_per_host'] = \ CONF.xenserver.torrent_max_seeder_processes_per_host lookup_fn = self._lookup_torrent_url_fn() params['torrent_url'] = lookup_fn(instance, image_id) vdis = session.call_plugin_serialized( 'bittorrent', 'download_vhd', **params) return vdis def upload_image(self, context, session, instance, vdi_uuids, image_id): raise NotImplementedError nova-2014.1.5/nova/virt/xenapi/image/__init__.py0000664000567000056700000000000012540642532022446 0ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/xenapi/image/glance.py0000664000567000056700000000562712540642544022167 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import exception from nova.image import glance from nova import utils from nova.virt.xenapi import vm_utils CONF = cfg.CONF CONF.import_opt('glance_num_retries', 'nova.image.glance') class GlanceStore(object): def _call_glance_plugin(self, session, fn, params): glance_api_servers = glance.get_api_servers() def pick_glance(kwargs): g_host, g_port, g_use_ssl = glance_api_servers.next() kwargs['glance_host'] = g_host kwargs['glance_port'] = g_port kwargs['glance_use_ssl'] = g_use_ssl return session.call_plugin_serialized_with_retry( 'glance', fn, CONF.glance_num_retries, pick_glance, **params) def _make_params(self, context, session, image_id): return {'image_id': image_id, 'sr_path': vm_utils.get_sr_path(session), 'extra_headers': glance.generate_identity_headers(context)} def download_image(self, context, session, instance, image_id): params = self._make_params(context, session, image_id) params['uuid_stack'] = vm_utils._make_uuid_stack() try: vdis = self._call_glance_plugin(session, 'download_vhd', params) except exception.PluginRetriesExceeded: raise exception.CouldNotFetchImage(image_id=image_id) return vdis def upload_image(self, context, session, instance, vdi_uuids, image_id): params = self._make_params(context, session, image_id) params['vdi_uuids'] = vdi_uuids props = params['properties'] = {} props['auto_disk_config'] = instance['auto_disk_config'] props['os_type'] = instance.get('os_type', None) or ( CONF.xenserver.default_os_type) compression_level = vm_utils.get_compression_level() if compression_level: props['xenapi_image_compression_level'] = compression_level auto_disk_config = utils.get_auto_disk_config_from_instance(instance) if utils.is_auto_disk_config_disabled(auto_disk_config): props["auto_disk_config"] = "disabled" try: self._call_glance_plugin(session, 'upload_vhd', params) except exception.PluginRetriesExceeded: raise exception.CouldNotUploadImage(image_id=image_id) nova-2014.1.5/nova/virt/xenapi/image/vdi_through_dev.py0000664000567000056700000000712212540642544024106 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import eventlet from eventlet import greenio import os import tarfile from nova.image import glance from nova import utils from nova.virt.xenapi import vm_utils class VdiThroughDevStore(object): """Deal with virtual disks by attaching them to the OS domU. At the moment it supports upload to Glance, and the upload format is a raw disk inside a tgz. """ def upload_image(self, context, session, instance, vdi_uuids, image_id): command = UploadToGlanceAsRawTgz( context, session, instance, vdi_uuids, image_id) return command.upload_image() def download_image(self, context, session, instance, image_id): # TODO(matelakat) Move through-dev image download functionality to this # method. raise NotImplementedError() class UploadToGlanceAsRawTgz(object): def __init__(self, context, session, instance, vdi_uuids, image_id): self.context = context self.image_id = image_id self.session = session self.vdi_uuids = vdi_uuids def _get_virtual_size(self): return self.session.call_xenapi( 'VDI.get_virtual_size', self._get_vdi_ref()) def _get_vdi_ref(self): return self.session.call_xenapi('VDI.get_by_uuid', self.vdi_uuids[0]) def _perform_upload(self, devpath): readfile, writefile = self._create_pipe() size = self._get_virtual_size() producer = TarGzProducer(devpath, writefile, size, 'disk.raw') consumer = glance.UpdateGlanceImage( self.context, self.image_id, producer.get_metadata(), readfile) pool = eventlet.GreenPool() pool.spawn(producer.start) pool.spawn(consumer.start) pool.waitall() def _create_pipe(self): rpipe, wpipe = os.pipe() rfile = greenio.GreenPipe(rpipe, 'rb', 0) wfile = greenio.GreenPipe(wpipe, 'wb', 0) return rfile, wfile def upload_image(self): vdi_ref = self._get_vdi_ref() with vm_utils.vdi_attached_here(self.session, vdi_ref, read_only=True) as dev: devpath = utils.make_dev_path(dev) with utils.temporary_chown(devpath): self._perform_upload(devpath) class TarGzProducer(object): def __init__(self, devpath, writefile, size, fname): self.fpath = devpath self.output = writefile self.size = size self.fname = fname def get_metadata(self): return { 'disk_format': 'raw', 'container_format': 'tgz' } def start(self): with contextlib.closing(self.output): tinfo = tarfile.TarInfo(name=self.fname) tinfo.size = int(self.size) with tarfile.open(fileobj=self.output, mode='w|gz') as tfile: with self._open_file(self.fpath, 'rb') as input_file: tfile.addfile(tinfo, fileobj=input_file) def _open_file(self, *args): return open(*args) nova-2014.1.5/nova/virt/xenapi/image/utils.py0000664000567000056700000000650212540642544022067 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import shutil import tarfile from nova.image import glance class GlanceImage(object): def __init__(self, context, image_href_or_id): self._context = context self._image_service, self._image_id = glance.get_remote_image_service( context, image_href_or_id) self._cached_meta = None @property def meta(self): if self._cached_meta is None: self._cached_meta = self._image_service.show( self._context, self._image_id) return self._cached_meta def download_to(self, fileobj): return self._image_service.download( self._context, self._image_id, fileobj) def is_raw_tgz(self): return ['raw', 'tgz'] == [ self.meta.get(key) for key in ('disk_format', 'container_format')] def data(self): return self._image_service.download(self._context, self._image_id) class RawImage(object): def __init__(self, glance_image): self.glance_image = glance_image def get_size(self): return int(self.glance_image.meta['size']) def stream_to(self, fileobj): return self.glance_image.download_to(fileobj) class IterableToFileAdapter(object): """A degenerate file-like so that an iterable could be read like a file. As Glance client returns an iterable, but tarfile requires a file like, this is the adapter between the two. This allows tarfile to access the glance stream. """ def __init__(self, iterable): self.iterator = iterable.__iter__() self.remaining_data = '' def read(self, size): chunk = self.remaining_data try: while not chunk: chunk = self.iterator.next() except StopIteration: return '' return_value = chunk[0:size] self.remaining_data = chunk[size:] return return_value class RawTGZImage(object): def __init__(self, glance_image): self.glance_image = glance_image self._tar_info = None self._tar_file = None def _as_file(self): return IterableToFileAdapter(self.glance_image.data()) def _as_tarfile(self): return tarfile.open(mode='r|gz', fileobj=self._as_file()) def get_size(self): if self._tar_file is None: self._tar_file = self._as_tarfile() self._tar_info = self._tar_file.next() return self._tar_info.size def stream_to(self, target_file): if self._tar_file is None: self._tar_file = self._as_tarfile() self._tar_info = self._tar_file.next() source_file = self._tar_file.extractfile(self._tar_info) shutil.copyfileobj(source_file, target_file) self._tar_file.close() nova-2014.1.5/nova/virt/xenapi/vm_utils.py0000664000567000056700000031162412540642544021513 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2011 Piston Cloud Computing, Inc. # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ import contextlib import os import time import urllib import uuid from xml.parsers import expat from eventlet import greenthread from oslo.config import cfg import six.moves.urllib.parse as urlparse from nova.api.metadata import base as instance_metadata from nova import block_device from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova import exception from nova.network import model as network_model from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import strutils from nova.openstack.common import timeutils from nova.openstack.common import units from nova.openstack.common import versionutils from nova.openstack.common import xmlutils from nova import utils from nova.virt import configdrive from nova.virt import cpu from nova.virt.disk import api as disk from nova.virt.disk.vfs import localfs as vfsimpl from nova.virt.xenapi import agent from nova.virt.xenapi.image import utils as image_utils from nova.virt.xenapi import volume_utils LOG = logging.getLogger(__name__) xenapi_vm_utils_opts = [ cfg.StrOpt('cache_images', default='all', deprecated_name='cache_images', deprecated_group='DEFAULT', help='Cache glance images locally. `all` will cache all' ' images, `some` will only cache images that have the' ' image_property `cache_in_nova=True`, and `none` turns' ' off caching entirely'), cfg.IntOpt('image_compression_level', deprecated_name='xenapi_image_compression_level', deprecated_group='DEFAULT', help='Compression level for images, e.g., 9 for gzip -9.' ' Range is 1-9, 9 being most compressed but most CPU' ' intensive on dom0.'), cfg.StrOpt('default_os_type', default='linux', deprecated_name='default_os_type', deprecated_group='DEFAULT', help='Default OS type'), cfg.IntOpt('block_device_creation_timeout', default=10, deprecated_name='block_device_creation_timeout', deprecated_group='DEFAULT', help='Time to wait for a block device to be created'), cfg.IntOpt('max_kernel_ramdisk_size', default=16 * units.Mi, deprecated_name='max_kernel_ramdisk_size', deprecated_group='DEFAULT', help='Maximum size in bytes of kernel or ramdisk images'), cfg.StrOpt('sr_matching_filter', default='default-sr:true', deprecated_name='sr_matching_filter', deprecated_group='DEFAULT', help='Filter for finding the SR to be used to install guest ' 'instances on. To use the Local Storage in default ' 'XenServer/XCP installations set this flag to ' 'other-config:i18n-key=local-storage. To select an SR ' 'with a different matching criteria, you could set it to ' 'other-config:my_favorite_sr=true. On the other hand, to ' 'fall back on the Default SR, as displayed by XenCenter, ' 'set this flag to: default-sr:true'), cfg.BoolOpt('sparse_copy', default=True, deprecated_name='xenapi_sparse_copy', deprecated_group='DEFAULT', help='Whether to use sparse_copy for copying data on a ' 'resize down (False will use standard dd). This speeds ' 'up resizes down considerably since large runs of zeros ' 'won\'t have to be rsynced'), cfg.IntOpt('num_vbd_unplug_retries', default=10, deprecated_name='xenapi_num_vbd_unplug_retries', deprecated_group='DEFAULT', help='Maximum number of retries to unplug VBD'), cfg.StrOpt('torrent_images', default='none', deprecated_name='xenapi_torrent_images', deprecated_group='DEFAULT', help='Whether or not to download images via Bit Torrent ' '(all|some|none).'), cfg.StrOpt('ipxe_network_name', deprecated_name='xenapi_ipxe_network_name', deprecated_group='DEFAULT', help='Name of network to use for booting iPXE ISOs'), cfg.StrOpt('ipxe_boot_menu_url', deprecated_name='xenapi_ipxe_boot_menu_url', deprecated_group='DEFAULT', help='URL to the iPXE boot menu'), cfg.StrOpt('ipxe_mkisofs_cmd', default='mkisofs', deprecated_name='xenapi_ipxe_mkisofs_cmd', deprecated_group='DEFAULT', help='Name and optionally path of the tool used for ' 'ISO image creation'), ] CONF = cfg.CONF # xenapi_vm_utils options in the DEFAULT group were deprecated in Icehouse CONF.register_opts(xenapi_vm_utils_opts, 'xenserver') CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('glance_num_retries', 'nova.image.glance') CONF.import_opt('use_ipv6', 'nova.netconf') XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, 'Paused': power_state.PAUSED, 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} SECTOR_SIZE = 512 MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE KERNEL_DIR = '/boot/guest' MAX_VDI_CHAIN_SIZE = 16 PROGRESS_INTERVAL_SECONDS = 300 # Fudge factor to allow for the VHD chain to be slightly larger than # the partitioned space. Otherwise, legitimate images near their # maximum allowed size can fail on build with FlavorDiskTooSmall. VHD_SIZE_CHECK_FUDGE_FACTOR_GB = 10 class ImageType(object): """Enumeration class for distinguishing different image types | 0 - kernel image (goes on dom0's filesystem) | 1 - ramdisk image (goes on dom0's filesystem) | 2 - disk image (local SR, partitioned by objectstore plugin) | 3 - raw disk image (local SR, NOT partitioned by plugin) | 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for | linux, HVM assumed for Windows) | 5 - ISO disk image (local SR, NOT partitioned by plugin) | 6 - config drive """ KERNEL = 0 RAMDISK = 1 DISK = 2 DISK_RAW = 3 DISK_VHD = 4 DISK_ISO = 5 DISK_CONFIGDRIVE = 6 _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO, DISK_CONFIGDRIVE) KERNEL_STR = "kernel" RAMDISK_STR = "ramdisk" DISK_STR = "root" DISK_RAW_STR = "os_raw" DISK_VHD_STR = "vhd" DISK_ISO_STR = "iso" DISK_CONFIGDRIVE_STR = "configdrive" _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR, DISK_ISO_STR, DISK_CONFIGDRIVE_STR) @classmethod def to_string(cls, image_type): return dict(zip(cls._ids, ImageType._strs)).get(image_type) @classmethod def get_role(cls, image_type_id): """Get the role played by the image, based on its type.""" return { cls.KERNEL: 'kernel', cls.RAMDISK: 'ramdisk', cls.DISK: 'root', cls.DISK_RAW: 'root', cls.DISK_VHD: 'root', cls.DISK_ISO: 'iso', cls.DISK_CONFIGDRIVE: 'configdrive' }.get(image_type_id) def get_vm_device_id(session, image_properties): # NOTE: device_id should be 2 for windows VMs which run new xentools # (>=6.1). Refer to http://support.citrix.com/article/CTX135099 for more # information. if image_properties is None: image_properties = {} device_id = image_properties.get('xenapi_device_id') # The device_id is required to be set for hypervisor version 6.1 and above if device_id: hypervisor_version = session.product_version if _hypervisor_supports_device_id(hypervisor_version): return device_id else: msg = _("Device id %(id)s specified is not supported by " "hypervisor version %(version)s") % {'id': device_id, 'version': hypervisor_version} raise exception.NovaException(msg) def _hypervisor_supports_device_id(version): version_as_string = '.'.join(str(v) for v in version) return(versionutils.is_compatible('6.1', version_as_string)) def create_vm(session, instance, name_label, kernel, ramdisk, use_pv_kernel=False, device_id=None): """Create a VM record. Returns new VM reference. the use_pv_kernel flag indicates whether the guest is HVM or PV There are 3 scenarios: 1. Using paravirtualization, kernel passed in 2. Using paravirtualization, kernel within the image 3. Using hardware virtualization """ flavor = flavors.extract_flavor(instance) mem = str(long(flavor['memory_mb']) * units.Mi) vcpus = str(flavor['vcpus']) vcpu_weight = flavor['vcpu_weight'] vcpu_params = {} if vcpu_weight is not None: # NOTE(johngarbutt) bug in XenServer 6.1 and 6.2 means # we need to specify both weight and cap for either to apply vcpu_params = {"weight": str(vcpu_weight), "cap": "0"} cpu_mask_list = cpu.get_cpuset_ids() if cpu_mask_list: cpu_mask = ",".join(str(cpu_id) for cpu_id in cpu_mask_list) vcpu_params["mask"] = cpu_mask viridian = 'true' if instance['os_type'] == 'windows' else 'false' rec = { 'actions_after_crash': 'destroy', 'actions_after_reboot': 'restart', 'actions_after_shutdown': 'destroy', 'affinity': '', 'blocked_operations': {}, 'ha_always_run': False, 'ha_restart_priority': '', 'HVM_boot_params': {}, 'HVM_boot_policy': '', 'is_a_template': False, 'memory_dynamic_min': mem, 'memory_dynamic_max': mem, 'memory_static_min': '0', 'memory_static_max': mem, 'memory_target': mem, 'name_description': '', 'name_label': name_label, 'other_config': {'nova_uuid': str(instance['uuid'])}, 'PCI_bus': '', 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': viridian, 'timeoffset': '0'}, 'PV_args': '', 'PV_bootloader': '', 'PV_bootloader_args': '', 'PV_kernel': '', 'PV_legacy_args': '', 'PV_ramdisk': '', 'recommendations': '', 'tags': [], 'user_version': '0', 'VCPUs_at_startup': vcpus, 'VCPUs_max': vcpus, 'VCPUs_params': vcpu_params, 'xenstore_data': {'vm-data/allowvssprovider': 'false'}} # Complete VM configuration record according to the image type # non-raw/raw with PV kernel/raw in HVM mode if use_pv_kernel: rec['platform']['nx'] = 'false' if instance['kernel_id']: # 1. Kernel explicitly passed in, use that rec['PV_args'] = 'root=/dev/xvda1' rec['PV_kernel'] = kernel rec['PV_ramdisk'] = ramdisk else: # 2. Use kernel within the image rec['PV_bootloader'] = 'pygrub' else: # 3. Using hardware virtualization rec['platform']['nx'] = 'true' rec['HVM_boot_params'] = {'order': 'dc'} rec['HVM_boot_policy'] = 'BIOS order' if device_id: rec['platform']['device_id'] = device_id vm_ref = session.VM.create(rec) LOG.debug(_('Created VM'), instance=instance) return vm_ref def destroy_vm(session, instance, vm_ref): """Destroys a VM record.""" try: session.VM.destroy(vm_ref) except session.XenAPI.Failure as exc: LOG.exception(exc) return LOG.debug(_("VM destroyed"), instance=instance) def clean_shutdown_vm(session, instance, vm_ref): if is_vm_shutdown(session, vm_ref): LOG.warn(_("VM already halted, skipping shutdown..."), instance=instance) return True LOG.debug(_("Shutting down VM (cleanly)"), instance=instance) try: session.call_xenapi('VM.clean_shutdown', vm_ref) except session.XenAPI.Failure as exc: LOG.exception(exc) return False return True def hard_shutdown_vm(session, instance, vm_ref): if is_vm_shutdown(session, vm_ref): LOG.warn(_("VM already halted, skipping shutdown..."), instance=instance) return True LOG.debug(_("Shutting down VM (hard)"), instance=instance) try: session.call_xenapi('VM.hard_shutdown', vm_ref) except session.XenAPI.Failure as exc: LOG.exception(exc) return False return True def is_vm_shutdown(session, vm_ref): state = get_power_state(session, vm_ref) if state == power_state.SHUTDOWN: return True return False def is_enough_free_mem(session, instance): flavor = flavors.extract_flavor(instance) mem = long(flavor['memory_mb']) * units.Mi host_free_mem = long(session.call_xenapi("host.compute_free_memory", session.host_ref)) return host_free_mem >= mem def find_vbd_by_number(session, vm_ref, number): """Get the VBD reference from the device number.""" vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref) if vbd_refs: for vbd_ref in vbd_refs: try: user_device = session.call_xenapi("VBD.get_userdevice", vbd_ref) if user_device == str(number): return vbd_ref except session.XenAPI.Failure as exc: LOG.exception(exc) raise volume_utils.StorageError( _('VBD not found in instance %s') % vm_ref) def _should_retry_unplug_vbd(err): # Retry if unplug failed with DEVICE_DETACH_REJECTED # For reasons which we don't understand, # we're seeing the device still in use, even when all processes # using the device should be dead. # Since XenServer 6.2, we also need to retry if we get # INTERNAL_ERROR, as that error goes away when you retry. return (err == 'DEVICE_DETACH_REJECTED' or err == 'INTERNAL_ERROR') def unplug_vbd(session, vbd_ref, this_vm_ref): max_attempts = CONF.xenserver.num_vbd_unplug_retries + 1 for num_attempt in xrange(1, max_attempts + 1): try: if num_attempt > 1: greenthread.sleep(1) session.VBD.unplug(vbd_ref, this_vm_ref) return except session.XenAPI.Failure as exc: err = len(exc.details) > 0 and exc.details[0] if err == 'DEVICE_ALREADY_DETACHED': LOG.info(_('VBD %s already detached'), vbd_ref) return elif _should_retry_unplug_vbd(err): LOG.info(_('VBD %(vbd_ref)s uplug failed with "%(err)s", ' 'attempt %(num_attempt)d/%(max_attempts)d'), {'vbd_ref': vbd_ref, 'num_attempt': num_attempt, 'max_attempts': max_attempts, 'err': err}) else: LOG.exception(exc) raise volume_utils.StorageError( _('Unable to unplug VBD %s') % vbd_ref) raise volume_utils.StorageError( _('Reached maximum number of retries trying to unplug VBD %s') % vbd_ref) def destroy_vbd(session, vbd_ref): """Destroy VBD from host database.""" try: session.call_xenapi('VBD.destroy', vbd_ref) except session.XenAPI.Failure as exc: LOG.exception(exc) raise volume_utils.StorageError( _('Unable to destroy VBD %s') % vbd_ref) def create_vbd(session, vm_ref, vdi_ref, userdevice, vbd_type='disk', read_only=False, bootable=False, osvol=False, empty=False, unpluggable=True): """Create a VBD record and returns its reference.""" vbd_rec = {} vbd_rec['VM'] = vm_ref if vdi_ref == None: vdi_ref = 'OpaqueRef:NULL' vbd_rec['VDI'] = vdi_ref vbd_rec['userdevice'] = str(userdevice) vbd_rec['bootable'] = bootable vbd_rec['mode'] = read_only and 'RO' or 'RW' vbd_rec['type'] = vbd_type vbd_rec['unpluggable'] = unpluggable vbd_rec['empty'] = empty vbd_rec['other_config'] = {} vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] LOG.debug(_('Creating %(vbd_type)s-type VBD for VM %(vm_ref)s,' ' VDI %(vdi_ref)s ... '), {'vbd_type': vbd_type, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref}) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,' ' VDI %(vdi_ref)s.'), {'vbd_ref': vbd_ref, 'vm_ref': vm_ref, 'vdi_ref': vdi_ref}) if osvol: # set osvol=True in other-config to indicate this is an # attached nova (or cinder) volume session.call_xenapi('VBD.add_to_other_config', vbd_ref, 'osvol', 'True') return vbd_ref def attach_cd(session, vm_ref, vdi_ref, userdevice): """Create an empty VBD, then insert the CD.""" vbd_ref = create_vbd(session, vm_ref, None, userdevice, vbd_type='cd', read_only=True, bootable=True, empty=True, unpluggable=False) session.call_xenapi('VBD.insert', vbd_ref, vdi_ref) return vbd_ref def destroy_vdi(session, vdi_ref): try: session.call_xenapi('VDI.destroy', vdi_ref) except session.XenAPI.Failure: msg = _("Unable to destroy VDI %s") % vdi_ref LOG.debug(msg, exc_info=True) LOG.error(msg) raise volume_utils.StorageError(msg) def safe_destroy_vdis(session, vdi_refs): """Tries to destroy the requested VDIs, but ignores any errors.""" for vdi_ref in vdi_refs: try: destroy_vdi(session, vdi_ref) except volume_utils.StorageError: msg = _("Ignoring error while destroying VDI: %s") % vdi_ref LOG.debug(msg) def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size, read_only=False): """Create a VDI record and returns its reference.""" vdi_ref = session.call_xenapi("VDI.create", {'name_label': name_label, 'name_description': disk_type, 'SR': sr_ref, 'virtual_size': str(virtual_size), 'type': 'User', 'sharable': False, 'read_only': read_only, 'xenstore_data': {}, 'other_config': _get_vdi_other_config(disk_type, instance=instance), 'sm_config': {}, 'tags': []}) LOG.debug(_('Created VDI %(vdi_ref)s (%(name_label)s,' ' %(virtual_size)s, %(read_only)s) on %(sr_ref)s.'), {'vdi_ref': vdi_ref, 'name_label': name_label, 'virtual_size': virtual_size, 'read_only': read_only, 'sr_ref': sr_ref}) return vdi_ref def get_vdi_uuid_for_volume(session, connection_data): sr_uuid, label, sr_params = volume_utils.parse_sr_info(connection_data) sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid) if not sr_ref: sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params) if sr_ref is None: raise exception.NovaException(_('SR not present and could not be ' 'introduced')) vdi_uuid = None if 'vdi_uuid' in connection_data: _scan_sr(session, sr_ref) vdi_uuid = connection_data['vdi_uuid'] else: try: vdi_ref = volume_utils.introduce_vdi(session, sr_ref) vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) except volume_utils.StorageError as exc: LOG.exception(exc) volume_utils.forget_sr(session, sr_ref) return vdi_uuid def get_vdis_for_instance(context, session, instance, name_label, image, image_type, block_device_info=None): vdis = {} if block_device_info: msg = "block device info: %s" % block_device_info # NOTE(mriedem): block_device_info can contain an auth_password # so we have to scrub the message before logging it. LOG.debug(logging.mask_password(msg), instance=instance) root_device_name = block_device_info['root_device_name'] for bdm in block_device_info['block_device_mapping']: if (block_device.strip_prefix(bdm['mount_device']) == block_device.strip_prefix(root_device_name)): # If we're a root-device, record that fact so we don't download # a root image via Glance type_ = 'root' else: # Otherwise, use mount_device as `type_` so that we have easy # access to it in _attach_disks to create the VBD type_ = bdm['mount_device'] connection_data = bdm['connection_info']['data'] vdi_uuid = get_vdi_uuid_for_volume(session, connection_data) if vdi_uuid: vdis[type_] = dict(uuid=vdi_uuid, file=None, osvol=True) # If we didn't get a root VDI from volumes, then use the Glance image as # the root device if 'root' not in vdis: create_image_vdis = _create_image( context, session, instance, name_label, image, image_type) vdis.update(create_image_vdis) # Just get the VDI ref once for vdi in vdis.itervalues(): vdi['ref'] = session.call_xenapi('VDI.get_by_uuid', vdi['uuid']) return vdis @contextlib.contextmanager def _dummy_vm(session, instance, vdi_ref): """This creates a temporary VM so that we can snapshot a VDI. VDI's can't be snapshotted directly since the API expects a `vm_ref`. To work around this, we need to create a temporary VM and then map the VDI to the VM using a temporary VBD. """ name_label = "dummy" vm_ref = create_vm(session, instance, name_label, None, None) try: vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect', read_only=True) try: yield vm_ref finally: try: destroy_vbd(session, vbd_ref) except volume_utils.StorageError: # destroy_vbd() will log error pass finally: destroy_vm(session, instance, vm_ref) def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref): """Copy a VDI and return the new VDIs reference. This function differs from the XenAPI `VDI.copy` call in that the copy is atomic and isolated, meaning we don't see half-downloaded images. It accomplishes this by copying the VDI's into a temporary directory and then atomically renaming them into the SR when the copy is completed. The correct long term solution is to fix `VDI.copy` so that it is atomic and isolated. """ with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref: label = "snapshot" with snapshot_attached_here( session, instance, vm_ref, label) as vdi_uuids: imported_vhds = session.call_plugin_serialized( 'workarounds', 'safe_copy_vdis', sr_path=get_sr_path(session, sr_ref=sr_ref), vdi_uuids=vdi_uuids, uuid_stack=_make_uuid_stack()) root_uuid = imported_vhds['root']['uuid'] # rescan to discover new VHDs scan_default_sr(session) vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid) return vdi_ref def _clone_vdi(session, vdi_to_clone_ref): """Clones a VDI and return the new VDIs reference.""" vdi_ref = session.call_xenapi('VDI.clone', vdi_to_clone_ref) LOG.debug(_('Cloned VDI %(vdi_ref)s from VDI ' '%(vdi_to_clone_ref)s'), {'vdi_ref': vdi_ref, 'vdi_to_clone_ref': vdi_to_clone_ref}) return vdi_ref def _get_vdi_other_config(disk_type, instance=None): """Return metadata to store in VDI's other_config attribute. `nova_instance_uuid` is used to associate a VDI with a particular instance so that, if it becomes orphaned from an unclean shutdown of a compute-worker, we can safely detach it. """ other_config = {'nova_disk_type': disk_type} # create_vdi may be called simply while creating a volume # hence information about instance may or may not be present if instance: other_config['nova_instance_uuid'] = instance['uuid'] return other_config def _set_vdi_info(session, vdi_ref, vdi_type, name_label, description, instance): existing_other_config = session.call_xenapi('VDI.get_other_config', vdi_ref) session.call_xenapi('VDI.set_name_label', vdi_ref, name_label) session.call_xenapi('VDI.set_name_description', vdi_ref, description) other_config = _get_vdi_other_config(vdi_type, instance=instance) for key, value in other_config.iteritems(): if key not in existing_other_config: session.call_xenapi( "VDI.add_to_other_config", vdi_ref, key, value) def _vm_get_vbd_refs(session, vm_ref): return session.call_xenapi("VM.get_VBDs", vm_ref) def _vbd_get_rec(session, vbd_ref): return session.call_xenapi("VBD.get_record", vbd_ref) def _vdi_get_rec(session, vdi_ref): return session.call_xenapi("VDI.get_record", vdi_ref) def _vdi_get_uuid(session, vdi_ref): return session.call_xenapi("VDI.get_uuid", vdi_ref) def _vdi_snapshot(session, vdi_ref): return session.call_xenapi("VDI.snapshot", vdi_ref, {}) def get_vdi_for_vm_safely(session, vm_ref, userdevice='0'): """Retrieves the primary VDI for a VM.""" vbd_refs = _vm_get_vbd_refs(session, vm_ref) for vbd_ref in vbd_refs: vbd_rec = _vbd_get_rec(session, vbd_ref) # Convention dictates the primary VDI will be userdevice 0 if vbd_rec['userdevice'] == userdevice: vdi_ref = vbd_rec['VDI'] vdi_rec = _vdi_get_rec(session, vdi_ref) return vdi_ref, vdi_rec raise exception.NovaException(_("No primary VDI found for %s") % vm_ref) def get_all_vdi_uuids_for_vm(session, vm_ref, min_userdevice=0): vbd_refs = _vm_get_vbd_refs(session, vm_ref) for vbd_ref in vbd_refs: vbd_rec = _vbd_get_rec(session, vbd_ref) if int(vbd_rec['userdevice']) >= min_userdevice: vdi_ref = vbd_rec['VDI'] yield _vdi_get_uuid(session, vdi_ref) def _try_strip_base_mirror_from_vdi(session, vdi_ref): try: session.call_xenapi("VDI.remove_from_sm_config", vdi_ref, "base_mirror") except session.XenAPI.Failure: LOG.debug(_("Error while removing sm_config"), exc_info=True) def strip_base_mirror_from_vdis(session, vm_ref): # NOTE(johngarbutt) part of workaround for XenServer bug CA-98606 vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref) for vbd_ref in vbd_refs: vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) _try_strip_base_mirror_from_vdi(session, vdi_ref) @contextlib.contextmanager def snapshot_attached_here(session, instance, vm_ref, label, userdevice='0', post_snapshot_callback=None): # impl method allow easier patching for tests return _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice, post_snapshot_callback) def _snapshot_attached_here_impl(session, instance, vm_ref, label, userdevice, post_snapshot_callback): """Snapshot the root disk only. Return a list of uuids for the vhds in the chain. """ LOG.debug(_("Starting snapshot for VM"), instance=instance) # Memorize the original_parent_uuid so we can poll for coalesce vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref, userdevice) original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref) sr_ref = vm_vdi_rec["SR"] snapshot_ref = _vdi_snapshot(session, vm_vdi_ref) if post_snapshot_callback is not None: post_snapshot_callback(task_state=task_states.IMAGE_PENDING_UPLOAD) try: # Ensure no VHDs will vanish while we migrate them _wait_for_vhd_coalesce(session, instance, sr_ref, vm_vdi_ref, original_parent_uuid) snapshot_uuid = _vdi_get_uuid(session, snapshot_ref) chain = _walk_vdi_chain(session, snapshot_uuid) vdi_uuids = [vdi_rec['uuid'] for vdi_rec in chain] yield vdi_uuids finally: safe_destroy_vdis(session, [snapshot_ref]) # TODO(johngarbut) we need to check the snapshot has been coalesced # now its associated VDI has been deleted. def get_sr_path(session, sr_ref=None): """Return the path to our storage repository This is used when we're dealing with VHDs directly, either by taking snapshots or by restoring an image in the DISK_VHD format. """ if sr_ref is None: sr_ref = safe_find_sr(session) pbd_rec = session.call_xenapi("PBD.get_all_records_where", 'field "host"="%s" and ' 'field "SR"="%s"' % (session.host_ref, sr_ref)) # NOTE(bobball): There can only be one PBD for a host/SR pair, but path is # not always present - older versions of XS do not set it. pbd_ref = pbd_rec.keys()[0] device_config = pbd_rec[pbd_ref]['device_config'] if 'path' in device_config: return device_config['path'] sr_rec = session.call_xenapi("SR.get_record", sr_ref) sr_uuid = sr_rec["uuid"] if sr_rec["type"] not in ["ext", "nfs"]: raise exception.NovaException( _("Only file-based SRs (ext/NFS) are supported by this feature." " SR %(uuid)s is of type %(type)s") % {"uuid": sr_uuid, "type": sr_rec["type"]}) return os.path.join(CONF.xenserver.sr_base_path, sr_uuid) def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False): """Destroy used or unused cached images. A cached image that is being used by at least one VM is said to be 'used'. In the case of an 'unused' image, the cached image will be the only descendent of the base-copy. So when we delete the cached-image, the refcount will drop to zero and XenServer will automatically destroy the base-copy for us. The default behavior of this function is to destroy only 'unused' cached images. To destroy all cached images, use the `all_cached=True` kwarg. """ cached_images = _find_cached_images(session, sr_ref) destroyed = set() def destroy_cached_vdi(vdi_uuid, vdi_ref): LOG.debug(_("Destroying cached VDI '%(vdi_uuid)s'")) if not dry_run: destroy_vdi(session, vdi_ref) destroyed.add(vdi_uuid) for vdi_ref in cached_images.values(): vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref) if all_cached: destroy_cached_vdi(vdi_uuid, vdi_ref) continue # Unused-Only: Search for siblings # Chain length greater than two implies a VM must be holding a ref to # the base-copy (otherwise it would have coalesced), so consider this # cached image used. chain = list(_walk_vdi_chain(session, vdi_uuid)) if len(chain) > 2: continue elif len(chain) == 2: # Siblings imply cached image is used root_vdi_rec = chain[-1] children = _child_vhds(session, sr_ref, root_vdi_rec['uuid']) if len(children) > 1: continue destroy_cached_vdi(vdi_uuid, vdi_ref) return destroyed def _find_cached_images(session, sr_ref): """Return a dict(uuid=vdi_ref) representing all cached images.""" cached_images = {} for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref): try: image_id = vdi_rec['other_config']['image-id'] except KeyError: continue cached_images[image_id] = vdi_ref return cached_images def _find_cached_image(session, image_id, sr_ref): """Returns the vdi-ref of the cached image.""" name_label = _get_image_vdi_label(image_id) recs = session.call_xenapi("VDI.get_all_records_where", 'field "name__label"="%s"' % name_label) number_found = len(recs) if number_found > 0: if number_found > 1: LOG.warn(_("Multiple base images for image: %s") % image_id) return recs.keys()[0] def _get_resize_func_name(session): brand = session.product_brand version = session.product_version # To maintain backwards compatibility. All recent versions # should use VDI.resize if version and brand: xcp = brand == 'XCP' r1_2_or_above = (version[0] == 1 and version[1] > 1) or version[0] > 1 xenserver = brand == 'XenServer' r6_or_above = version[0] > 5 if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above): return 'VDI.resize_online' return 'VDI.resize' def _vdi_get_virtual_size(session, vdi_ref): size = session.call_xenapi('VDI.get_virtual_size', vdi_ref) return int(size) def _vdi_resize(session, vdi_ref, new_size): resize_func_name = _get_resize_func_name(session) session.call_xenapi(resize_func_name, vdi_ref, str(new_size)) def update_vdi_virtual_size(session, instance, vdi_ref, new_gb): virtual_size = _vdi_get_virtual_size(session, vdi_ref) new_disk_size = new_gb * units.Gi msg = _("Resizing up VDI %(vdi_ref)s from %(virtual_size)d " "to %(new_disk_size)d") LOG.debug(msg, {'vdi_ref': vdi_ref, 'virtual_size': virtual_size, 'new_disk_size': new_disk_size}, instance=instance) if virtual_size < new_disk_size: # For resize up. Simple VDI resize will do the trick _vdi_resize(session, vdi_ref, new_disk_size) elif virtual_size == new_disk_size: LOG.debug(_("No need to change vdi virtual size."), instance=instance) else: # NOTE(johngarbutt): we should never get here # but if we don't raise an exception, a user might be able to use # more storage than allowed by their chosen instance flavor LOG.error(_("VDI %s is bigger than requested resize up size."), vdi_ref, instance=instance) raise exception.ResizeError(_("VDI too big for requested resize up.")) def resize_disk(session, instance, vdi_ref, flavor): size_gb = flavor['root_gb'] if size_gb == 0: reason = _("Can't resize a disk to 0 GB.") raise exception.ResizeError(reason=reason) sr_ref = safe_find_sr(session) clone_ref = _clone_vdi(session, vdi_ref) try: # Resize partition and filesystem down _auto_configure_disk(session, clone_ref, size_gb) # Create new VDI vdi_size = size_gb * units.Gi # NOTE(johannes): No resizing allowed for rescue instances, so # using instance['name'] is safe here new_ref = create_vdi(session, sr_ref, instance, instance['name'], 'root', vdi_size) new_uuid = session.call_xenapi('VDI.get_uuid', new_ref) # Manually copy contents over virtual_size = size_gb * units.Gi _copy_partition(session, clone_ref, new_ref, 1, virtual_size) return new_ref, new_uuid finally: destroy_vdi(session, clone_ref) def _auto_configure_disk(session, vdi_ref, new_gb): """Partition and resize FS to match the size specified by flavors.root_gb. This is a fail-safe to prevent accidentally destroying data on a disk erroneously marked as auto_disk_config=True. The criteria for allowing resize are: 1. 'auto_disk_config' must be true for the instance (and image). (If we've made it here, then auto_disk_config=True.) 2. The disk must have only one partition. 3. The file-system on the one partition must be ext3 or ext4. """ if new_gb == 0: LOG.debug(_("Skipping auto_config_disk as destination size is 0GB")) return with vdi_attached_here(session, vdi_ref, read_only=False) as dev: partitions = _get_partitions(dev) if len(partitions) != 1: reason = _('Disk must have only one partition.') raise exception.CannotResizeDisk(reason=reason) _num, start, old_sectors, ptype = partitions[0] if ptype in ('ext3', 'ext4'): new_sectors = new_gb * units.Gi / SECTOR_SIZE _resize_part_and_fs(dev, start, old_sectors, new_sectors) else: reason = _('Disk contains a filesystem ' 'we are unable to resize: %s') raise exception.CannotResizeDisk(reason=(reason % ptype)) def try_auto_configure_disk(session, vdi_ref, new_gb): try: _auto_configure_disk(session, vdi_ref, new_gb) except exception.CannotResizeDisk as e: msg = _('Attempted auto_configure_disk failed because: %s') LOG.warn(msg % e) def _make_partition(session, dev, partition_start, partition_end): dev_path = utils.make_dev_path(dev) # NOTE(bobball) If this runs in Dom0, parted will error trying # to re-read the partition table and return a generic error utils.execute('parted', '--script', dev_path, 'mklabel', 'msdos', run_as_root=True, check_exit_code=not session.is_local_connection) utils.execute('parted', '--script', dev_path, '--', 'mkpart', 'primary', partition_start, partition_end, run_as_root=True, check_exit_code=not session.is_local_connection) partition_path = utils.make_dev_path(dev, partition=1) if session.is_local_connection: # Need to refresh the partitions utils.trycmd('kpartx', '-a', dev_path, run_as_root=True, discard_warnings=True) # Sometimes the partition gets created under /dev/mapper, depending # on the setup in dom0. mapper_path = '/dev/mapper/%s' % os.path.basename(partition_path) if os.path.exists(mapper_path): return mapper_path return partition_path def _generate_disk(session, instance, vm_ref, userdevice, name_label, disk_type, size_mb, fs_type): """Steps to programmatically generate a disk: 1. Create VDI of desired size 2. Attach VDI to compute worker 3. Create partition 4. Create VBD between instance VM and VDI """ # 1. Create VDI sr_ref = safe_find_sr(session) ONE_MEG = units.Mi virtual_size = size_mb * ONE_MEG vdi_ref = create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size) try: # 2. Attach VDI to compute worker (VBD hotplug) with vdi_attached_here(session, vdi_ref, read_only=False) as dev: # 3. Create partition partition_start = "0" partition_end = "-0" partition_path = _make_partition(session, dev, partition_start, partition_end) if fs_type == 'linux-swap': utils.execute('mkswap', partition_path, run_as_root=True) elif fs_type is not None: utils.execute('mkfs', '-t', fs_type, partition_path, run_as_root=True) # 4. Create VBD between instance VM and VDI if vm_ref: create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False) except Exception: with excutils.save_and_reraise_exception(): msg = _("Error while generating disk number: %s") % userdevice LOG.debug(msg, instance=instance, exc_info=True) safe_destroy_vdis(session, [vdi_ref]) return vdi_ref def generate_swap(session, instance, vm_ref, userdevice, name_label, swap_mb): # NOTE(jk0): We use a FAT32 filesystem for the Windows swap # partition because that is what parted supports. is_windows = instance['os_type'] == "windows" fs_type = "vfat" if is_windows else "linux-swap" _generate_disk(session, instance, vm_ref, userdevice, name_label, 'swap', swap_mb, fs_type) def get_ephemeral_disk_sizes(total_size_gb): if not total_size_gb: return max_size_gb = 2000 if total_size_gb % 1024 == 0: max_size_gb = 1024 left_to_allocate = total_size_gb while left_to_allocate > 0: size_gb = min(max_size_gb, left_to_allocate) yield size_gb left_to_allocate -= size_gb def generate_single_ephemeral(session, instance, vm_ref, userdevice, size_gb, instance_name_label=None): if instance_name_label is None: instance_name_label = instance["name"] name_label = "%s ephemeral" % instance_name_label #TODO(johngarbutt) need to move DEVICE_EPHEMERAL from vmops to use it here label_number = int(userdevice) - 4 if label_number > 0: name_label = "%s (%d)" % (name_label, label_number) return _generate_disk(session, instance, vm_ref, str(userdevice), name_label, 'ephemeral', size_gb * 1024, CONF.default_ephemeral_format) def generate_ephemeral(session, instance, vm_ref, first_userdevice, instance_name_label, total_size_gb): # NOTE(johngarbutt): max possible size of a VHD disk is 2043GB sizes = get_ephemeral_disk_sizes(total_size_gb) first_userdevice = int(first_userdevice) vdi_refs = [] try: for userdevice, size_gb in enumerate(sizes, start=first_userdevice): ref = generate_single_ephemeral(session, instance, vm_ref, userdevice, size_gb, instance_name_label) vdi_refs.append(ref) except Exception as exc: with excutils.save_and_reraise_exception(): LOG.debug(_("Error when generating ephemeral disk. " "Device: %(userdevice)s Size GB: %(size_gb)s " "Error: %(exc)s"), { 'userdevice': userdevice, 'size_gb': size_gb, 'exc': exc}) safe_destroy_vdis(session, vdi_refs) def generate_iso_blank_root_disk(session, instance, vm_ref, userdevice, name_label, size_gb): _generate_disk(session, instance, vm_ref, userdevice, name_label, 'user', size_gb * 1024, CONF.default_ephemeral_format) def generate_configdrive(session, instance, vm_ref, userdevice, network_info, admin_password=None, files=None): sr_ref = safe_find_sr(session) vdi_ref = create_vdi(session, sr_ref, instance, 'config-2', 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES) try: with vdi_attached_here(session, vdi_ref, read_only=False) as dev: extra_md = {} if admin_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata(instance, content=files, extra_md=extra_md, network_info=network_info) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: with utils.tempdir() as tmp_path: tmp_file = os.path.join(tmp_path, 'configdrive') cdb.make_drive(tmp_file) dev_path = utils.make_dev_path(dev) utils.execute('dd', 'if=%s' % tmp_file, 'of=%s' % dev_path, 'oflag=direct,sync', run_as_root=True) create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False, read_only=True) except Exception: with excutils.save_and_reraise_exception(): msg = _("Error while generating config drive") LOG.debug(msg, instance=instance, exc_info=True) safe_destroy_vdis(session, [vdi_ref]) def _create_kernel_image(context, session, instance, name_label, image_id, image_type): """Creates kernel/ramdisk file from the image stored in the cache. If the image is not present in the cache, it streams it from glance. Returns: A list of dictionaries that describe VDIs """ filename = "" if CONF.xenserver.cache_images: args = {} args['cached-image'] = image_id args['new-image-uuid'] = str(uuid.uuid4()) filename = session.call_plugin('kernel', 'create_kernel_ramdisk', args) if filename == "": return _fetch_disk_image(context, session, instance, name_label, image_id, image_type) else: vdi_type = ImageType.to_string(image_type) return {vdi_type: dict(uuid=None, file=filename)} def create_kernel_and_ramdisk(context, session, instance, name_label): kernel_file = None ramdisk_file = None if instance['kernel_id']: vdis = _create_kernel_image(context, session, instance, name_label, instance['kernel_id'], ImageType.KERNEL) kernel_file = vdis['kernel'].get('file') if instance['ramdisk_id']: vdis = _create_kernel_image(context, session, instance, name_label, instance['ramdisk_id'], ImageType.RAMDISK) ramdisk_file = vdis['ramdisk'].get('file') return kernel_file, ramdisk_file def destroy_kernel_ramdisk(session, instance, kernel, ramdisk): args = {} if kernel: args['kernel-file'] = kernel if ramdisk: args['ramdisk-file'] = ramdisk if args: LOG.debug(_("Removing kernel/ramdisk files from dom0"), instance=instance) session.call_plugin('kernel', 'remove_kernel_ramdisk', args) def _get_image_vdi_label(image_id): return 'Glance Image %s' % image_id def _create_cached_image(context, session, instance, name_label, image_id, image_type): sr_ref = safe_find_sr(session) sr_type = session.call_xenapi('SR.get_type', sr_ref) if CONF.use_cow_images and sr_type != "ext": LOG.warning(_("Fast cloning is only supported on default local SR " "of type ext. SR on this system was found to be of " "type %s. Ignoring the cow flag."), sr_type) @utils.synchronized('xenapi-image-cache' + image_id) def _create_cached_image_impl(context, session, instance, name_label, image_id, image_type, sr_ref): cache_vdi_ref = _find_cached_image(session, image_id, sr_ref) if cache_vdi_ref is None: vdis = _fetch_image(context, session, instance, name_label, image_id, image_type) cache_vdi_ref = session.call_xenapi( 'VDI.get_by_uuid', vdis['root']['uuid']) session.call_xenapi('VDI.set_name_label', cache_vdi_ref, _get_image_vdi_label(image_id)) session.call_xenapi('VDI.set_name_description', cache_vdi_ref, 'root') session.call_xenapi('VDI.add_to_other_config', cache_vdi_ref, 'image-id', str(image_id)) if CONF.use_cow_images: new_vdi_ref = _clone_vdi(session, cache_vdi_ref) elif sr_type == 'ext': new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, cache_vdi_ref) else: new_vdi_ref = session.call_xenapi("VDI.copy", cache_vdi_ref, sr_ref) session.call_xenapi('VDI.set_name_label', new_vdi_ref, '') session.call_xenapi('VDI.set_name_description', new_vdi_ref, '') session.call_xenapi('VDI.remove_from_other_config', new_vdi_ref, 'image-id') vdi_uuid = session.call_xenapi('VDI.get_uuid', new_vdi_ref) return vdi_uuid vdi_uuid = _create_cached_image_impl(context, session, instance, name_label, image_id, image_type, sr_ref) vdis = {} vdi_type = ImageType.get_role(image_type) vdis[vdi_type] = dict(uuid=vdi_uuid, file=None) return vdis def _create_image(context, session, instance, name_label, image_id, image_type): """Creates VDI from the image stored in the local cache. If the image is not present in the cache, it streams it from glance. Returns: A list of dictionaries that describe VDIs """ cache_images = CONF.xenserver.cache_images.lower() # Determine if the image is cacheable if image_type == ImageType.DISK_ISO: cache = False elif cache_images == 'all': cache = True elif cache_images == 'some': sys_meta = utils.instance_sys_meta(instance) try: cache = strutils.bool_from_string(sys_meta['image_cache_in_nova']) except KeyError: cache = False elif cache_images == 'none': cache = False else: LOG.warning(_("Unrecognized cache_images value '%s', defaulting to" " True"), CONF.xenserver.cache_images) cache = True # Fetch (and cache) the image if cache: vdis = _create_cached_image(context, session, instance, name_label, image_id, image_type) else: vdis = _fetch_image(context, session, instance, name_label, image_id, image_type) for vdi_type, vdi in vdis.iteritems(): vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi['uuid']) _set_vdi_info(session, vdi_ref, vdi_type, name_label, vdi_type, instance) return vdis def _fetch_image(context, session, instance, name_label, image_id, image_type): """Fetch image from glance based on image type. Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ if image_type == ImageType.DISK_VHD: vdis = _fetch_vhd_image(context, session, instance, image_id) else: vdis = _fetch_disk_image(context, session, instance, name_label, image_id, image_type) for vdi_type, vdi in vdis.iteritems(): vdi_uuid = vdi['uuid'] LOG.debug(_("Fetched VDIs of type '%(vdi_type)s' with UUID" " '%(vdi_uuid)s'"), {'vdi_type': vdi_type, 'vdi_uuid': vdi_uuid}, instance=instance) return vdis def _make_uuid_stack(): # NOTE(sirp): The XenAPI plugins run under Python 2.4 # which does not have the `uuid` module. To work around this, # we generate the uuids here (under Python 2.6+) and # pass them as arguments return [str(uuid.uuid4()) for i in xrange(MAX_VDI_CHAIN_SIZE)] def _image_uses_bittorrent(context, instance): bittorrent = False torrent_images = CONF.xenserver.torrent_images.lower() if torrent_images == 'all': bittorrent = True elif torrent_images == 'some': sys_meta = utils.instance_sys_meta(instance) try: bittorrent = strutils.bool_from_string( sys_meta['image_bittorrent']) except KeyError: pass elif torrent_images == 'none': pass else: LOG.warning(_("Invalid value '%s' for torrent_images"), torrent_images) return bittorrent def _default_download_handler(): # TODO(sirp): This should be configurable like upload_handler return importutils.import_object( 'nova.virt.xenapi.image.glance.GlanceStore') def _choose_download_handler(context, instance): if _image_uses_bittorrent(context, instance): return importutils.import_object( 'nova.virt.xenapi.image.bittorrent.BittorrentStore') else: return _default_download_handler() def get_compression_level(): level = CONF.xenserver.image_compression_level if level is not None and (level < 1 or level > 9): LOG.warn(_("Invalid value '%d' for image_compression_level"), level) return None return level def _fetch_vhd_image(context, session, instance, image_id): """Tell glance to download an image and put the VHDs into the SR Returns: A list of dictionaries that describe VDIs """ LOG.debug(_("Asking xapi to fetch vhd image %s"), image_id, instance=instance) handler = _choose_download_handler(context, instance) try: vdis = handler.download_image(context, session, instance, image_id) except Exception as e: default_handler = _default_download_handler() # Using type() instead of isinstance() so instance of subclass doesn't # test as equivalent if type(handler) == type(default_handler): raise LOG.exception(_("Download handler '%(handler)s' raised an" " exception, falling back to default handler" " '%(default_handler)s'") % {'handler': handler, 'default_handler': default_handler}) vdis = default_handler.download_image( context, session, instance, image_id) # Ensure we can see the import VHDs as VDIs scan_default_sr(session) vdi_uuid = vdis['root']['uuid'] try: _check_vdi_size(context, session, instance, vdi_uuid) except Exception: with excutils.save_and_reraise_exception(): msg = _("Error while checking vdi size") LOG.debug(msg, instance=instance, exc_info=True) for vdi in vdis.values(): vdi_uuid = vdi['uuid'] vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid) safe_destroy_vdis(session, [vdi_ref]) return vdis def _get_vdi_chain_size(session, vdi_uuid): """Compute the total size of a VDI chain, starting with the specified VDI UUID. This will walk the VDI chain to the root, add the size of each VDI into the total. """ size_bytes = 0 for vdi_rec in _walk_vdi_chain(session, vdi_uuid): cur_vdi_uuid = vdi_rec['uuid'] vdi_size_bytes = int(vdi_rec['physical_utilisation']) LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=' '%(vdi_size_bytes)d'), {'cur_vdi_uuid': cur_vdi_uuid, 'vdi_size_bytes': vdi_size_bytes}) size_bytes += vdi_size_bytes return size_bytes def _check_vdi_size(context, session, instance, vdi_uuid): flavor = flavors.extract_flavor(instance) allowed_size = (flavor['root_gb'] + VHD_SIZE_CHECK_FUDGE_FACTOR_GB) * units.Gi if not flavor['root_gb']: # root_gb=0 indicates that we're disabling size checks return size = _get_vdi_chain_size(session, vdi_uuid) if size > allowed_size: LOG.error(_("Image size %(size)d exceeded flavor " "allowed size %(allowed_size)d"), {'size': size, 'allowed_size': allowed_size}, instance=instance) raise exception.FlavorDiskTooSmall() def _fetch_disk_image(context, session, instance, name_label, image_id, image_type): """Fetch the image from Glance NOTE: Unlike _fetch_vhd_image, this method does not use the Glance plugin; instead, it streams the disks through domU to the VDI directly. Returns: A single filename if image_type is KERNEL_RAMDISK A list of dictionaries that describe VDIs, otherwise """ # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores image_type_str = ImageType.to_string(image_type) LOG.debug(_("Fetching image %(image_id)s, type %(image_type_str)s"), {'image_id': image_id, 'image_type_str': image_type_str}, instance=instance) if image_type == ImageType.DISK_ISO: sr_ref = _safe_find_iso_sr(session) else: sr_ref = safe_find_sr(session) glance_image = image_utils.GlanceImage(context, image_id) if glance_image.is_raw_tgz(): image = image_utils.RawTGZImage(glance_image) else: image = image_utils.RawImage(glance_image) virtual_size = image.get_size() vdi_size = virtual_size LOG.debug(_("Size for image %(image_id)s: %(virtual_size)d"), {'image_id': image_id, 'virtual_size': virtual_size}, instance=instance) if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and vdi_size > CONF.xenserver.max_kernel_ramdisk_size): max_size = CONF.xenserver.max_kernel_ramdisk_size raise exception.NovaException( _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " "max %(max_size)d bytes") % {'vdi_size': vdi_size, 'max_size': max_size}) vdi_ref = create_vdi(session, sr_ref, instance, name_label, image_type_str, vdi_size) # From this point we have a VDI on Xen host; # If anything goes wrong, we need to remember its uuid. try: filename = None vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) with vdi_attached_here(session, vdi_ref, read_only=False) as dev: _stream_disk( session, image.stream_to, image_type, virtual_size, dev) if image_type in (ImageType.KERNEL, ImageType.RAMDISK): # We need to invoke a plugin for copying the # content of the VDI into the proper path. LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref, instance=instance) args = {} args['vdi-ref'] = vdi_ref # Let the plugin copy the correct number of bytes. args['image-size'] = str(vdi_size) if CONF.xenserver.cache_images: args['cached-image'] = image_id filename = session.call_plugin('kernel', 'copy_vdi', args) # Remove the VDI as it is not needed anymore. destroy_vdi(session, vdi_ref) LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref, instance=instance) vdi_role = ImageType.get_role(image_type) return {vdi_role: dict(uuid=None, file=filename)} else: vdi_role = ImageType.get_role(image_type) return {vdi_role: dict(uuid=vdi_uuid, file=None)} except (session.XenAPI.Failure, IOError, OSError) as e: # We look for XenAPI and OS failures. LOG.exception(_("Failed to fetch glance image"), instance=instance) e.args = e.args + ([dict(type=ImageType.to_string(image_type), uuid=vdi_uuid, file=filename)],) raise def determine_disk_image_type(image_meta): """Disk Image Types are used to determine where the kernel will reside within an image. To figure out which type we're dealing with, we use the following rules: 1. If we're using Glance, we can use the image_type field to determine the image_type 2. If we're not using Glance, then we need to deduce this based on whether a kernel_id is specified. """ if not image_meta or 'disk_format' not in image_meta: return None disk_format = image_meta['disk_format'] disk_format_map = { 'ami': ImageType.DISK, 'aki': ImageType.KERNEL, 'ari': ImageType.RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD, 'iso': ImageType.DISK_ISO, } try: image_type = disk_format_map[disk_format] except KeyError: raise exception.InvalidDiskFormat(disk_format=disk_format) image_ref = image_meta.get('id') params = { 'image_type_str': ImageType.to_string(image_type), 'image_ref': image_ref } LOG.debug(_("Detected %(image_type_str)s format for image %(image_ref)s"), params) return image_type def determine_vm_mode(instance, disk_image_type): current_mode = vm_mode.get_from_instance(instance) if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM: return current_mode os_type = instance['os_type'] if os_type == "linux": return vm_mode.XEN if os_type == "windows": return vm_mode.HVM # disk_image_type specific default for backwards compatibility if disk_image_type == ImageType.DISK_VHD or \ disk_image_type == ImageType.DISK: return vm_mode.XEN # most images run OK as HVM return vm_mode.HVM def set_vm_name_label(session, vm_ref, name_label): session.call_xenapi("VM.set_name_label", vm_ref, name_label) def list_vms(session): vms = session.call_xenapi("VM.get_all_records_where", 'field "is_control_domain"="false" and ' 'field "is_a_template"="false" and ' 'field "resident_on"="%s"' % session.host_ref) for vm_ref in vms.keys(): yield vm_ref, vms[vm_ref] def lookup_vm_vdis(session, vm_ref): """Look for the VDIs that are attached to the VM.""" # Firstly we get the VBDs, then the VDIs. # TODO(Armando): do we leave the read-only devices? vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref) vdi_refs = [] if vbd_refs: for vbd_ref in vbd_refs: try: vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) # Test valid VDI vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref) LOG.debug(_('VDI %s is still available'), vdi_uuid) vbd_other_config = session.call_xenapi("VBD.get_other_config", vbd_ref) if not vbd_other_config.get('osvol'): # This is not an attached volume vdi_refs.append(vdi_ref) except session.XenAPI.Failure as exc: LOG.exception(exc) return vdi_refs def lookup(session, name_label, check_rescue=False): """Look the instance up and return it if available. :param check_rescue: if True will return the 'name'-rescue vm if it exists, instead of just 'name' """ if check_rescue: result = lookup(session, name_label + '-rescue', False) if result: return result vm_refs = session.call_xenapi("VM.get_by_name_label", name_label) n = len(vm_refs) if n == 0: return None elif n > 1: raise exception.InstanceExists(name=name_label) else: return vm_refs[0] def preconfigure_instance(session, instance, vdi_ref, network_info): """Makes alterations to the image before launching as part of spawn. """ # As mounting the image VDI is expensive, we only want do do it once, # if at all, so determine whether it's required first, and then do # everything mount_required = False key, net, metadata = _prepare_injectables(instance, network_info) mount_required = key or net or metadata if not mount_required: return with vdi_attached_here(session, vdi_ref, read_only=False) as dev: _mounted_processing(dev, key, net, metadata) def lookup_kernel_ramdisk(session, vm): vm_rec = session.call_xenapi("VM.get_record", vm) if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec: return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk']) else: return (None, None) def is_snapshot(session, vm): vm_rec = session.call_xenapi("VM.get_record", vm) if 'is_a_template' in vm_rec and 'is_a_snapshot' in vm_rec: return vm_rec['is_a_template'] and vm_rec['is_a_snapshot'] else: return False def get_power_state(session, vm_ref): xapi_state = session.call_xenapi("VM.get_power_state", vm_ref) return XENAPI_POWER_STATE[xapi_state] def compile_info(session, vm_ref): """Fill record with VM status information.""" power_state = get_power_state(session, vm_ref) max_mem = session.call_xenapi("VM.get_memory_static_max", vm_ref) mem = session.call_xenapi("VM.get_memory_dynamic_max", vm_ref) num_cpu = session.call_xenapi("VM.get_VCPUs_max", vm_ref) return {'state': power_state, 'max_mem': long(max_mem) >> 10, 'mem': long(mem) >> 10, 'num_cpu': num_cpu, 'cpu_time': 0} def compile_diagnostics(record): """Compile VM diagnostics data.""" try: keys = [] diags = {} vm_uuid = record["uuid"] xml = _get_rrd(_get_rrd_server(), vm_uuid) if xml: rrd = xmlutils.safe_minidom_parse_string(xml) for i, node in enumerate(rrd.firstChild.childNodes): # Provide the last update of the information if node.localName == 'lastupdate': diags['last_update'] = node.firstChild.data # Create a list of the diagnostic keys (in their order) if node.localName == 'ds': ref = node.childNodes # Name and Value if len(ref) > 6: keys.append(ref[0].firstChild.data) # Read the last row of the first RRA to get the latest info if node.localName == 'rra': rows = node.childNodes[4].childNodes last_row = rows[rows.length - 1].childNodes for j, value in enumerate(last_row): diags[keys[j]] = value.firstChild.data break return diags except expat.ExpatError as e: LOG.exception(_('Unable to parse rrd of %s'), e) return {"Unable to retrieve diagnostics": e} def fetch_bandwidth(session): bw = session.call_plugin_serialized('bandwidth', 'fetch_all_bandwidth') return bw def _scan_sr(session, sr_ref=None, max_attempts=4): if sr_ref: # NOTE(johngarbutt) xenapi will collapse any duplicate requests # for SR.scan if there is already a scan in progress. # However, we don't want that, because the scan may have started # before we modified the underlying VHDs on disk through a plugin. # Using our own mutex will reduce cases where our periodic SR scan # in host.update_status starts racing the sr.scan after a plugin call. @utils.synchronized('sr-scan-' + sr_ref) def do_scan(sr_ref): LOG.debug(_("Scanning SR %s"), sr_ref) attempt = 1 while True: try: return session.call_xenapi('SR.scan', sr_ref) except session.XenAPI.Failure as exc: with excutils.save_and_reraise_exception() as ctxt: if exc.details[0] == 'SR_BACKEND_FAILURE_40': if attempt < max_attempts: ctxt.reraise = False LOG.warn(_("Retry SR scan due to error: %s") % exc) greenthread.sleep(2 ** attempt) attempt += 1 do_scan(sr_ref) def scan_default_sr(session): """Looks for the system default SR and triggers a re-scan.""" sr_ref = safe_find_sr(session) _scan_sr(session, sr_ref) return sr_ref def safe_find_sr(session): """Same as _find_sr except raises a NotFound exception if SR cannot be determined """ sr_ref = _find_sr(session) if sr_ref is None: raise exception.StorageRepositoryNotFound() return sr_ref def _find_sr(session): """Return the storage repository to hold VM images.""" host = session.host_ref try: tokens = CONF.xenserver.sr_matching_filter.split(':') filter_criteria = tokens[0] filter_pattern = tokens[1] except IndexError: # oops, flag is invalid LOG.warning(_("Flag sr_matching_filter '%s' does not respect " "formatting convention"), CONF.xenserver.sr_matching_filter) return None if filter_criteria == 'other-config': key, value = filter_pattern.split('=', 1) for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'): if not (key in sr_rec['other_config'] and sr_rec['other_config'][key] == value): continue for pbd_ref in sr_rec['PBDs']: pbd_rec = session.get_rec('PBD', pbd_ref) if pbd_rec and pbd_rec['host'] == host: return sr_ref elif filter_criteria == 'default-sr' and filter_pattern == 'true': pool_ref = session.call_xenapi('pool.get_all')[0] sr_ref = session.call_xenapi('pool.get_default_SR', pool_ref) if sr_ref: return sr_ref # No SR found! LOG.error(_("XenAPI is unable to find a Storage Repository to " "install guest instances on. Please check your " "configuration (e.g. set a default SR for the pool) " "and/or configure the flag 'sr_matching_filter'.")) return None def _safe_find_iso_sr(session): """Same as _find_iso_sr except raises a NotFound exception if SR cannot be determined """ sr_ref = _find_iso_sr(session) if sr_ref is None: raise exception.NotFound(_('Cannot find SR of content-type ISO')) return sr_ref def _find_iso_sr(session): """Return the storage repository to hold ISO images.""" host = session.host_ref for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'): LOG.debug(_("ISO: looking at SR %s"), sr_rec) if not sr_rec['content_type'] == 'iso': LOG.debug(_("ISO: not iso content")) continue if 'i18n-key' not in sr_rec['other_config']: LOG.debug(_("ISO: iso content_type, no 'i18n-key' key")) continue if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': LOG.debug(_("ISO: iso content_type, i18n-key value not " "'local-storage-iso'")) continue LOG.debug(_("ISO: SR MATCHing our criteria")) for pbd_ref in sr_rec['PBDs']: LOG.debug(_("ISO: ISO, looking to see if it is host local")) pbd_rec = session.get_rec('PBD', pbd_ref) if not pbd_rec: LOG.debug(_("ISO: PBD %s disappeared"), pbd_ref) continue pbd_rec_host = pbd_rec['host'] LOG.debug(_("ISO: PBD matching, want %(pbd_rec)s, have %(host)s"), {'pbd_rec': pbd_rec, 'host': host}) if pbd_rec_host == host: LOG.debug(_("ISO: SR with local PBD")) return sr_ref return None def _get_rrd_server(): """Return server's scheme and address to use for retrieving RRD XMLs.""" xs_url = urlparse.urlparse(CONF.xenserver.connection_url) return [xs_url.scheme, xs_url.netloc] def _get_rrd(server, vm_uuid): """Return the VM RRD XML as a string.""" try: xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % ( server[0], CONF.xenserver.connection_username, CONF.xenserver.connection_password, server[1], vm_uuid)) return xml.read() except IOError: LOG.exception(_('Unable to obtain RRD XML for VM %(vm_uuid)s with ' 'server details: %(server)s.'), {'vm_uuid': vm_uuid, 'server': server}) return None def _get_all_vdis_in_sr(session, sr_ref): for vdi_ref in session.call_xenapi('SR.get_VDIs', sr_ref): vdi_rec = session.get_rec('VDI', vdi_ref) # Check to make sure the record still exists. It may have # been deleted between the get_all call and get_rec call if vdi_rec: yield vdi_ref, vdi_rec def get_instance_vdis_for_sr(session, vm_ref, sr_ref): """Return opaqueRef for all the vdis which live on sr.""" for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref): try: vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref) if sr_ref == session.call_xenapi('VDI.get_SR', vdi_ref): yield vdi_ref except session.XenAPI.Failure: continue def _get_vhd_parent_uuid(session, vdi_ref, vdi_rec=None): if vdi_rec is None: vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) if 'vhd-parent' not in vdi_rec['sm_config']: return None parent_uuid = vdi_rec['sm_config']['vhd-parent'] vdi_uuid = vdi_rec['uuid'] LOG.debug(_('VHD %(vdi_uuid)s has parent %(parent_uuid)s'), {'vdi_uuid': vdi_uuid, 'parent_uuid': parent_uuid}) return parent_uuid def _walk_vdi_chain(session, vdi_uuid): """Yield vdi_recs for each element in a VDI chain.""" scan_default_sr(session) while True: vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid) vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) yield vdi_rec parent_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec) if not parent_uuid: break vdi_uuid = parent_uuid def _child_vhds(session, sr_ref, vdi_uuid): """Return the immediate children of a given VHD. This is not recursive, only the immediate children are returned. """ children = set() for ref, rec in _get_all_vdis_in_sr(session, sr_ref): rec_uuid = rec['uuid'] if rec_uuid == vdi_uuid: continue parent_uuid = _get_vhd_parent_uuid(session, ref, rec) if parent_uuid != vdi_uuid: continue children.add(rec_uuid) return children def _another_child_vhd(session, vdi_ref, sr_ref, original_parent_uuid): # Search for any other vdi which parents to original parent and is not # in the active vm/instance vdi chain. vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref) vdi_uuid = vdi_rec['uuid'] parent_vdi_uuid = _get_vhd_parent_uuid(session, vdi_ref, vdi_rec) for _ref, rec in _get_all_vdis_in_sr(session, sr_ref): if ((rec['uuid'] != vdi_uuid) and (rec['uuid'] != parent_vdi_uuid) and (rec['sm_config'].get('vhd-parent') == original_parent_uuid)): # Found another vhd which too parents to original parent. return True # Found no other vdi with the same parent. return False def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref, original_parent_uuid): """Spin until the parent VHD is coalesced into its parent VHD Before coalesce: * original_parent_vhd * parent_vhd snapshot After coalesce: * parent_vhd snapshot """ # NOTE(sirp): If we don't have an original_parent_uuid, then the snapshot # doesn't have a grandparent to coalesce into, so we can skip waiting if not original_parent_uuid: return # Check if original parent has any other child. If so, coalesce will # not take place. if _another_child_vhd(session, vdi_ref, sr_ref, original_parent_uuid): parent_uuid = _get_vhd_parent_uuid(session, vdi_ref) parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid) base_uuid = _get_vhd_parent_uuid(session, parent_ref) return parent_uuid, base_uuid max_attempts = CONF.xenserver.vhd_coalesce_max_attempts for i in xrange(max_attempts): # NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config` # matches the underlying VHDs. _scan_sr(session, sr_ref) parent_uuid = _get_vhd_parent_uuid(session, vdi_ref) if parent_uuid and (parent_uuid != original_parent_uuid): LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent" " %(original_parent_uuid)s, waiting for coalesce..."), {'parent_uuid': parent_uuid, 'original_parent_uuid': original_parent_uuid}, instance=instance) else: parent_ref = session.call_xenapi("VDI.get_by_uuid", parent_uuid) base_uuid = _get_vhd_parent_uuid(session, parent_ref) return parent_uuid, base_uuid greenthread.sleep(CONF.xenserver.vhd_coalesce_poll_interval) msg = (_("VHD coalesce attempts exceeded (%d)" ", giving up...") % max_attempts) raise exception.NovaException(msg) def _remap_vbd_dev(dev): """Return the appropriate location for a plugged-in VBD device Ubuntu Maverick moved xvd? -> sd?. This is considered a bug and will be fixed in future versions: https://bugs.launchpad.net/ubuntu/+source/linux/+bug/684875 For now, we work around it by just doing a string replace. """ # NOTE(sirp): This hack can go away when we pull support for Maverick should_remap = CONF.xenserver.remap_vbd_dev if not should_remap: return dev old_prefix = 'xvd' new_prefix = CONF.xenserver.remap_vbd_dev_prefix remapped_dev = dev.replace(old_prefix, new_prefix) return remapped_dev def _wait_for_device(dev): """Wait for device node to appear.""" for i in xrange(0, CONF.xenserver.block_device_creation_timeout): dev_path = utils.make_dev_path(dev) if os.path.exists(dev_path): return time.sleep(1) raise volume_utils.StorageError( _('Timeout waiting for device %s to be created') % dev) def cleanup_attached_vdis(session): """Unplug any instance VDIs left after an unclean restart.""" this_vm_ref = _get_this_vm_ref(session) vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref) for vbd_ref in vbd_refs: try: vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref) vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref) except session.XenAPI.Failure as e: if e.details[0] != 'HANDLE_INVALID': raise continue if 'nova_instance_uuid' in vdi_rec['other_config']: # Belongs to an instance and probably left over after an # unclean restart LOG.info(_('Disconnecting stale VDI %s from compute domU'), vdi_rec['uuid']) unplug_vbd(session, vbd_ref, this_vm_ref) destroy_vbd(session, vbd_ref) @contextlib.contextmanager def vdi_attached_here(session, vdi_ref, read_only=False): this_vm_ref = _get_this_vm_ref(session) vbd_ref = create_vbd(session, this_vm_ref, vdi_ref, 'autodetect', read_only=read_only, bootable=False) try: LOG.debug(_('Plugging VBD %s ... '), vbd_ref) session.VBD.plug(vbd_ref, this_vm_ref) try: LOG.debug(_('Plugging VBD %s done.'), vbd_ref) orig_dev = session.call_xenapi("VBD.get_device", vbd_ref) LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s'), {'vbd_ref': vbd_ref, 'orig_dev': orig_dev}) dev = _remap_vbd_dev(orig_dev) if dev != orig_dev: LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, ' 'remapping to %(dev)s'), {'vbd_ref': vbd_ref, 'dev': dev}) _wait_for_device(dev) yield dev finally: utils.execute('sync', run_as_root=True) LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref) unplug_vbd(session, vbd_ref, this_vm_ref) finally: try: destroy_vbd(session, vbd_ref) except volume_utils.StorageError: # destroy_vbd() will log error pass LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref) def _get_sys_hypervisor_uuid(): with file('/sys/hypervisor/uuid') as f: return f.readline().strip() def get_this_vm_uuid(session): if session and session.is_local_connection: # UUID is the control domain running on this host vms = session.call_xenapi("VM.get_all_records_where", 'field "is_control_domain"="true" and ' 'field "resident_on"="%s"' % session.host_ref) return vms[vms.keys()[0]]['uuid'] try: return _get_sys_hypervisor_uuid() except IOError: # Some guest kernels (without 5c13f8067745efc15f6ad0158b58d57c44104c25) # cannot read from uuid after a reboot. Fall back to trying xenstore. # See https://bugs.launchpad.net/ubuntu/+source/xen-api/+bug/1081182 domid, _ = utils.execute('xenstore-read', 'domid', run_as_root=True) vm_key, _ = utils.execute('xenstore-read', '/local/domain/%s/vm' % domid.strip(), run_as_root=True) return vm_key.strip()[4:] def _get_this_vm_ref(session): return session.call_xenapi("VM.get_by_uuid", get_this_vm_uuid(session)) def _get_partitions(dev): """Return partition information (num, size, type) for a device.""" dev_path = utils.make_dev_path(dev) out, _err = utils.execute('parted', '--script', '--machine', dev_path, 'unit s', 'print', run_as_root=True) lines = [line for line in out.split('\n') if line] partitions = [] LOG.debug(_("Partitions:")) for line in lines[2:]: num, start, end, size, ptype = line.split(':')[:5] start = int(start.rstrip('s')) end = int(end.rstrip('s')) size = int(size.rstrip('s')) LOG.debug(_(" %(num)s: %(ptype)s %(size)d sectors"), {'num': num, 'ptype': ptype, 'size': size}) partitions.append((num, start, size, ptype)) return partitions def _stream_disk(session, image_service_func, image_type, virtual_size, dev): offset = 0 if image_type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(session, virtual_size, dev) dev_path = utils.make_dev_path(dev) with utils.temporary_chown(dev_path): with open(dev_path, 'wb') as f: f.seek(offset) image_service_func(f) def _write_partition(session, virtual_size, dev): dev_path = utils.make_dev_path(dev) primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d' ' to %(dev_path)s...'), {'primary_first': primary_first, 'primary_last': primary_last, 'dev_path': dev_path}) def execute(*cmd, **kwargs): return utils.execute(*cmd, **kwargs) _make_partition(session, dev, "%ds" % primary_first, "%ds" % primary_last) LOG.debug(_('Writing partition table %s done.'), dev_path) def _repair_filesystem(partition_path): # Exit Code 1 = File system errors corrected # 2 = File system errors corrected, system needs a reboot utils.execute('e2fsck', '-f', '-y', partition_path, run_as_root=True, check_exit_code=[0, 1, 2]) def _resize_part_and_fs(dev, start, old_sectors, new_sectors): """Resize partition and fileystem. This assumes we are dealing with a single primary partition and using ext3 or ext4. """ size = new_sectors - start end = new_sectors - 1 dev_path = utils.make_dev_path(dev) partition_path = utils.make_dev_path(dev, partition=1) # Replay journal if FS wasn't cleanly unmounted _repair_filesystem(partition_path) # Remove ext3 journal (making it ext2) utils.execute('tune2fs', '-O ^has_journal', partition_path, run_as_root=True) if new_sectors < old_sectors: # Resizing down, resize filesystem before partition resize try: utils.execute('resize2fs', partition_path, '%ds' % size, run_as_root=True) except processutils.ProcessExecutionError as exc: LOG.error(str(exc)) reason = _("Shrinking the filesystem down with resize2fs " "has failed, please check if you have " "enough free space on your disk.") raise exception.ResizeError(reason=reason) utils.execute('parted', '--script', dev_path, 'rm', '1', run_as_root=True) utils.execute('parted', '--script', dev_path, 'mkpart', 'primary', '%ds' % start, '%ds' % end, run_as_root=True) if new_sectors > old_sectors: # Resizing up, resize filesystem after partition resize utils.execute('resize2fs', partition_path, run_as_root=True) # Add back journal utils.execute('tune2fs', '-j', partition_path, run_as_root=True) def _log_progress_if_required(left, last_log_time, virtual_size): if timeutils.is_older_than(last_log_time, PROGRESS_INTERVAL_SECONDS): last_log_time = timeutils.utcnow() complete_pct = float(virtual_size - left) / virtual_size * 100 LOG.debug(_("Sparse copy in progress, " "%(complete_pct).2f%% complete. " "%(left)s bytes left to copy"), {"complete_pct": complete_pct, "left": left}) return last_log_time def _sparse_copy(src_path, dst_path, virtual_size, block_size=4096): """Copy data, skipping long runs of zeros to create a sparse file.""" start_time = last_log_time = timeutils.utcnow() EMPTY_BLOCK = '\0' * block_size bytes_read = 0 skipped_bytes = 0 left = virtual_size LOG.debug(_("Starting sparse_copy src=%(src_path)s dst=%(dst_path)s " "virtual_size=%(virtual_size)d block_size=%(block_size)d"), {'src_path': src_path, 'dst_path': dst_path, 'virtual_size': virtual_size, 'block_size': block_size}) # NOTE(sirp): we need read/write access to the devices; since we don't have # the luxury of shelling out to a sudo'd command, we temporarily take # ownership of the devices. with utils.temporary_chown(src_path): with utils.temporary_chown(dst_path): with open(src_path, "r") as src: with open(dst_path, "w") as dst: data = src.read(min(block_size, left)) while data: if data == EMPTY_BLOCK: dst.seek(block_size, os.SEEK_CUR) left -= block_size bytes_read += block_size skipped_bytes += block_size else: dst.write(data) data_len = len(data) left -= data_len bytes_read += data_len if left <= 0: break data = src.read(min(block_size, left)) greenthread.sleep(0) last_log_time = _log_progress_if_required( left, last_log_time, virtual_size) duration = timeutils.delta_seconds(start_time, timeutils.utcnow()) compression_pct = float(skipped_bytes) / bytes_read * 100 LOG.debug(_("Finished sparse_copy in %(duration).2f secs, " "%(compression_pct).2f%% reduction in size"), {'duration': duration, 'compression_pct': compression_pct}) def _copy_partition(session, src_ref, dst_ref, partition, virtual_size): # Part of disk taken up by MBR virtual_size -= MBR_SIZE_BYTES with vdi_attached_here(session, src_ref, read_only=True) as src: src_path = utils.make_dev_path(src, partition=partition) with vdi_attached_here(session, dst_ref, read_only=False) as dst: dst_path = utils.make_dev_path(dst, partition=partition) _write_partition(session, virtual_size, dst) if CONF.xenserver.sparse_copy: _sparse_copy(src_path, dst_path, virtual_size) else: num_blocks = virtual_size / SECTOR_SIZE utils.execute('dd', 'if=%s' % src_path, 'of=%s' % dst_path, 'count=%d' % num_blocks, 'iflag=direct,sync', 'oflag=direct,sync', run_as_root=True) def _mount_filesystem(dev_path, dir): """mounts the device specified by dev_path in dir.""" try: _out, err = utils.execute('mount', '-t', 'ext2,ext3,ext4,reiserfs', dev_path, dir, run_as_root=True) except processutils.ProcessExecutionError as e: err = str(e) return err def _mounted_processing(device, key, net, metadata): """Callback which runs with the image VDI attached.""" # NB: Partition 1 hardcoded dev_path = utils.make_dev_path(device, partition=1) with utils.tempdir() as tmpdir: # Mount only Linux filesystems, to avoid disturbing NTFS images err = _mount_filesystem(dev_path, tmpdir) if not err: try: # This try block ensures that the umount occurs if not agent.find_guest_agent(tmpdir): vfs = vfsimpl.VFSLocalFS(imgfile=None, imgfmt=None, imgdir=tmpdir) LOG.info(_('Manipulating interface files directly')) # for xenapi, we don't 'inject' admin_password here, # it's handled at instance startup time, nor do we # support injecting arbitrary files here. disk.inject_data_into_fs(vfs, key, net, metadata, None, None) finally: utils.execute('umount', dev_path, run_as_root=True) else: LOG.info(_('Failed to mount filesystem (expected for ' 'non-linux instances): %s') % err) def _prepare_injectables(inst, network_info): """prepares the ssh key and the network configuration file to be injected into the disk image """ #do the import here - Jinja2 will be loaded only if injection is performed import jinja2 tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template) env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path)) template = env.get_template(tmpl_file) metadata = inst['metadata'] key = str(inst['key_data']) net = None if network_info: ifc_num = -1 interfaces_info = [] for vif in network_info: ifc_num += 1 try: if not vif['network'].get_meta('injected'): # network is not specified injected continue except KeyError: # vif network is None continue # NOTE(tr3buchet): using all subnets in case dns is stored in a # subnet that isn't chosen as first v4 or v6 # subnet in the case where there is more than one # dns = list of address of each dns entry from each vif subnet dns = [ip['address'] for subnet in vif['network']['subnets'] for ip in subnet['dns']] dns = ' '.join(dns).strip() interface_info = {'name': 'eth%d' % ifc_num, 'address': '', 'netmask': '', 'gateway': '', 'broadcast': '', 'dns': dns or '', 'address_v6': '', 'netmask_v6': '', 'gateway_v6': '', 'use_ipv6': CONF.use_ipv6} # NOTE(tr3buchet): the original code used the old network_info # which only supported a single ipv4 subnet # (and optionally, a single ipv6 subnet). # I modified it to use the new network info model, # which adds support for multiple v4 or v6 # subnets. I chose to ignore any additional # subnets, just as the original code ignored # additional IP information # populate v4 info if v4 subnet and ip exist try: # grab the first v4 subnet (or it raises) subnet = [s for s in vif['network']['subnets'] if s['version'] == 4][0] # get the subnet's first ip (or it raises) ip = subnet['ips'][0] # populate interface_info subnet_netaddr = subnet.as_netaddr() interface_info['address'] = ip['address'] interface_info['netmask'] = subnet_netaddr.netmask interface_info['gateway'] = subnet['gateway']['address'] interface_info['broadcast'] = subnet_netaddr.broadcast except IndexError: # there isn't a v4 subnet or there are no ips pass # populate v6 info if v6 subnet and ip exist try: # grab the first v6 subnet (or it raises) subnet = [s for s in vif['network']['subnets'] if s['version'] == 6][0] # get the subnet's first ip (or it raises) ip = subnet['ips'][0] # populate interface_info interface_info['address_v6'] = ip['address'] interface_info['netmask_v6'] = subnet.as_netaddr().netmask interface_info['gateway_v6'] = subnet['gateway']['address'] except IndexError: # there isn't a v6 subnet or there are no ips pass interfaces_info.append(interface_info) if interfaces_info: net = template.render({'interfaces': interfaces_info, 'use_ipv6': CONF.use_ipv6}) return key, net, metadata def ensure_correct_host(session): """Ensure we're connected to the host we're running on. This is the required configuration for anything that uses vdi_attached_here. """ this_vm_uuid = get_this_vm_uuid(session) try: session.call_xenapi('VM.get_by_uuid', this_vm_uuid) except session.XenAPI.Failure as exc: if exc.details[0] != 'UUID_INVALID': raise raise Exception(_('This domU must be running on the host ' 'specified by connection_url')) def import_all_migrated_disks(session, instance): root_vdi = _import_migrated_root_disk(session, instance) eph_vdis = _import_migrate_ephemeral_disks(session, instance) return {'root': root_vdi, 'ephemerals': eph_vdis} def _import_migrated_root_disk(session, instance): chain_label = instance['uuid'] vdi_label = instance['name'] return _import_migrated_vhds(session, instance, chain_label, "root", vdi_label) def _import_migrate_ephemeral_disks(session, instance): ephemeral_vdis = {} instance_uuid = instance['uuid'] ephemeral_gb = instance["ephemeral_gb"] disk_sizes = get_ephemeral_disk_sizes(ephemeral_gb) for chain_number, _size in enumerate(disk_sizes, start=1): chain_label = instance_uuid + "_ephemeral_%d" % chain_number vdi_label = "%(name)s ephemeral (%(number)d)" % dict( name=instance['name'], number=chain_number) ephemeral_vdi = _import_migrated_vhds(session, instance, chain_label, "ephemeral", vdi_label) userdevice = 3 + chain_number ephemeral_vdis[str(userdevice)] = ephemeral_vdi return ephemeral_vdis def _import_migrated_vhds(session, instance, chain_label, disk_type, vdi_label): """Move and possibly link VHDs via the XAPI plugin.""" # TODO(johngarbutt) tidy up plugin params imported_vhds = session.call_plugin_serialized( 'migration', 'move_vhds_into_sr', instance_uuid=chain_label, sr_path=get_sr_path(session), uuid_stack=_make_uuid_stack()) # Now we rescan the SR so we find the VHDs scan_default_sr(session) vdi_uuid = imported_vhds['root']['uuid'] vdi_ref = session.call_xenapi('VDI.get_by_uuid', vdi_uuid) # Set name-label so we can find if we need to clean up a failed migration _set_vdi_info(session, vdi_ref, disk_type, vdi_label, disk_type, instance) return {'uuid': vdi_uuid, 'ref': vdi_ref} def migrate_vhd(session, instance, vdi_uuid, dest, sr_path, seq_num, ephemeral_number=0): LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"), {'vdi_uuid': vdi_uuid, 'seq_num': seq_num}, instance=instance) chain_label = instance['uuid'] if ephemeral_number: chain_label = instance['uuid'] + "_ephemeral_%d" % ephemeral_number try: # TODO(johngarbutt) tidy up plugin params session.call_plugin_serialized('migration', 'transfer_vhd', instance_uuid=chain_label, host=dest, vdi_uuid=vdi_uuid, sr_path=sr_path, seq_num=seq_num) except session.XenAPI.Failure: msg = _("Failed to transfer vhd to new host") LOG.debug(msg, instance=instance, exc_info=True) raise exception.MigrationError(reason=msg) def vm_ref_or_raise(session, instance_name): vm_ref = lookup(session, instance_name) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance_name) return vm_ref def handle_ipxe_iso(session, instance, cd_vdi, network_info): """iPXE ISOs are a mechanism to allow the customer to roll their own image. To use this feature, a service provider needs to configure the appropriate Nova flags, roll an iPXE ISO, then distribute that image to customers via Glance. NOTE: `mkisofs` is not present by default in the Dom0, so the service provider can either add that package manually to Dom0 or include the `mkisofs` binary in the image itself. """ boot_menu_url = CONF.xenserver.ipxe_boot_menu_url if not boot_menu_url: LOG.warn(_('ipxe_boot_menu_url not set, user will have to' ' enter URL manually...'), instance=instance) return network_name = CONF.xenserver.ipxe_network_name if not network_name: LOG.warn(_('ipxe_network_name not set, user will have to' ' enter IP manually...'), instance=instance) return network = None for vif in network_info: if vif['network']['label'] == network_name: network = vif['network'] break if not network: LOG.warn(_("Unable to find network matching '%(network_name)s', user" " will have to enter IP manually...") % {'network_name': network_name}, instance=instance) return sr_path = get_sr_path(session) # Unpack IPv4 network info subnet = [sn for sn in network['subnets'] if sn['version'] == 4][0] ip = subnet['ips'][0] ip_address = ip['address'] netmask = network_model.get_netmask(ip, subnet) gateway = subnet['gateway']['address'] dns = subnet['dns'][0]['address'] try: session.call_plugin_serialized("ipxe", "inject", sr_path, cd_vdi['uuid'], boot_menu_url, ip_address, netmask, gateway, dns, CONF.xenserver.ipxe_mkisofs_cmd) except session.XenAPI.Failure as exc: _type, _method, error = exc.details[:3] if error == 'CommandNotFound': LOG.warn(_("ISO creation tool '%s' does not exist.") % CONF.xenserver.ipxe_mkisofs_cmd, instance=instance) else: raise def set_other_config_pci(session, vm_ref, params): """Set the pci key of other-config parameter to params.""" other_config = session.call_xenapi("VM.get_other_config", vm_ref) other_config['pci'] = params session.call_xenapi("VM.set_other_config", vm_ref, other_config) nova-2014.1.5/nova/virt/xenapi/driver.py0000664000567000056700000007247512540642544021154 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A driver for XenServer or Xen Cloud Platform. **Related Flags** :connection_url: URL for connection to XenServer/Xen Cloud Platform. :connection_username: Username for connection to XenServer/Xen Cloud Platform (default: root). :connection_password: Password for connection to XenServer/Xen Cloud Platform. :target_host: the iSCSI Target Host IP address, i.e. the IP address for the nova-volume host :target_port: iSCSI Target Port, 3260 Default :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' **Variable Naming Scheme** - suffix "_ref" for opaque references - suffix "_uuid" for UUIDs - suffix "_rec" for record objects """ import math from oslo.config import cfg import six.moves.urllib.parse as urlparse from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils from nova.virt import driver from nova.virt.xenapi.client import session from nova.virt.xenapi import host from nova.virt.xenapi import pool from nova.virt.xenapi import vm_utils from nova.virt.xenapi import vmops from nova.virt.xenapi import volumeops LOG = logging.getLogger(__name__) xenapi_opts = [ cfg.StrOpt('connection_url', deprecated_name='xenapi_connection_url', deprecated_group='DEFAULT', help='URL for connection to XenServer/Xen Cloud Platform. ' 'A special value of unix://local can be used to connect ' 'to the local unix socket. ' 'Required if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('connection_username', default='root', deprecated_name='xenapi_connection_username', deprecated_group='DEFAULT', help='Username for connection to XenServer/Xen Cloud Platform. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('connection_password', deprecated_name='xenapi_connection_password', deprecated_group='DEFAULT', help='Password for connection to XenServer/Xen Cloud Platform. ' 'Used only if compute_driver=xenapi.XenAPIDriver', secret=True), cfg.FloatOpt('vhd_coalesce_poll_interval', default=5.0, deprecated_name='xenapi_vhd_coalesce_poll_interval', deprecated_group='DEFAULT', help='The interval used for polling of coalescing vhds. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.BoolOpt('check_host', default=True, deprecated_name='xenapi_check_host', deprecated_group='DEFAULT', help='Ensure compute service is running on host XenAPI ' 'connects to.'), cfg.IntOpt('vhd_coalesce_max_attempts', default=20, deprecated_name='xenapi_vhd_coalesce_max_attempts', deprecated_group='DEFAULT', help='Max number of times to poll for VHD to coalesce. ' 'Used only if compute_driver=xenapi.XenAPIDriver'), cfg.StrOpt('sr_base_path', default='/var/run/sr-mount', deprecated_name='xenapi_sr_base_path', deprecated_group='DEFAULT', help='Base path to the storage repository'), cfg.StrOpt('target_host', deprecated_name='target_host', deprecated_group='DEFAULT', help='The iSCSI Target Host'), cfg.StrOpt('target_port', default='3260', deprecated_name='target_port', deprecated_group='DEFAULT', help='The iSCSI Target Port, default is port 3260'), cfg.StrOpt('iqn_prefix', default='iqn.2010-10.org.openstack', deprecated_name='iqn_prefix', deprecated_group='DEFAULT', help='IQN Prefix'), # NOTE(sirp): This is a work-around for a bug in Ubuntu Maverick, # when we pull support for it, we should remove this cfg.BoolOpt('remap_vbd_dev', default=False, deprecated_name='xenapi_remap_vbd_dev', deprecated_group='DEFAULT', help='Used to enable the remapping of VBD dev ' '(Works around an issue in Ubuntu Maverick)'), cfg.StrOpt('remap_vbd_dev_prefix', default='sd', deprecated_name='xenapi_remap_vbd_dev_prefix', deprecated_group='DEFAULT', help='Specify prefix to remap VBD dev to ' '(ex. /dev/xvdb -> /dev/sdb)'), ] CONF = cfg.CONF # xenapi options in the DEFAULT group were deprecated in Icehouse CONF.register_opts(xenapi_opts, 'xenserver') CONF.import_opt('host', 'nova.netconf') OVERHEAD_BASE = 3 OVERHEAD_PER_MB = 0.00781 OVERHEAD_PER_VCPU = 1.5 class XenAPIDriver(driver.ComputeDriver): """A connection to XenServer or Xen Cloud Platform.""" def __init__(self, virtapi, read_only=False): super(XenAPIDriver, self).__init__(virtapi) url = CONF.xenserver.connection_url username = CONF.xenserver.connection_username password = CONF.xenserver.connection_password if not url or password is None: raise Exception(_('Must specify connection_url, ' 'connection_username (optionally), and ' 'connection_password to use ' 'compute_driver=xenapi.XenAPIDriver')) self._session = session.XenAPISession(url, username, password) self._volumeops = volumeops.VolumeOps(self._session) self._host_state = None self._host = host.Host(self._session, self.virtapi) self._vmops = vmops.VMOps(self._session, self.virtapi) self._initiator = None self._hypervisor_hostname = None self._pool = pool.ResourcePool(self._session, self.virtapi) @property def host_state(self): if not self._host_state: self._host_state = host.HostState(self._session) return self._host_state def init_host(self, host): if CONF.xenserver.check_host: vm_utils.ensure_correct_host(self._session) try: vm_utils.cleanup_attached_vdis(self._session) except Exception: LOG.exception(_('Failure while cleaning up attached VDIs')) def instance_exists(self, instance_name): """Checks existence of an instance on the host. :param instance_name: The name of the instance to lookup Returns True if an instance with the supplied name exists on the host, False otherwise. NOTE(belliott): This is an override of the base method for efficiency. """ return self._vmops.instance_exists(instance_name) def estimate_instance_overhead(self, instance_info): """Get virtualization overhead required to build an instance of the given flavor. :param instance_info: Instance/flavor to calculate overhead for. :returns: Overhead memory in MB. """ # XenServer memory overhead is proportional to the size of the # VM. Larger flavor VMs become more efficient with respect to # overhead. # interpolated formula to predict overhead required per vm. # based on data from: # https://wiki.openstack.org/wiki/XenServer/Overhead # Some padding is done to each value to fit all available VM data memory_mb = instance_info['memory_mb'] vcpus = instance_info.get('vcpus', 1) overhead = ((memory_mb * OVERHEAD_PER_MB) + (vcpus * OVERHEAD_PER_VCPU) + OVERHEAD_BASE) overhead = math.ceil(overhead) return {'memory_mb': overhead} def list_instances(self): """List VM instances.""" return self._vmops.list_instances() def list_instance_uuids(self): """Get the list of nova instance uuids for VMs found on the hypervisor. """ return self._vmops.list_instance_uuids() def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): """Create VM instance.""" self._vmops.spawn(context, instance, image_meta, injected_files, admin_password, network_info, block_device_info) def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" # TODO(Vek): Need to pass context in for access to auth_token self._vmops.confirm_migration(migration, instance, network_info) def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): """Finish reverting a resize.""" # NOTE(vish): Xen currently does not use network info. self._vmops.finish_revert_migration(context, instance, block_device_info, power_on) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance=False, block_device_info=None, power_on=True): """Completes a resize, turning on the migrated instance.""" self._vmops.finish_migration(context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info, power_on) def snapshot(self, context, instance, image_id, update_task_state): """Create snapshot from a running VM instance.""" self._vmops.snapshot(context, instance, image_id, update_task_state) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot VM instance.""" self._vmops.reboot(instance, reboot_type, bad_volumes_callback=bad_volumes_callback) def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance.""" self._vmops.set_admin_password(instance, new_pass) def inject_file(self, instance, b64_path, b64_contents): """Create a file on the VM instance. The file path and contents should be base64-encoded. """ self._vmops.inject_file(instance, b64_path, b64_contents) def change_instance_metadata(self, context, instance, diff): """Apply a diff to the instance metadata.""" self._vmops.change_instance_metadata(instance, diff) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy VM instance.""" self._vmops.destroy(instance, network_info, block_device_info, destroy_disks) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Cleanup after instance being destroyed by Hypervisor.""" pass def pause(self, instance): """Pause VM instance.""" self._vmops.pause(instance) def unpause(self, instance): """Unpause paused VM instance.""" self._vmops.unpause(instance) def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): """Transfers the VHD of a running instance to another host, then shuts off the instance copies over the COW disk """ # NOTE(vish): Xen currently does not use network info. return self._vmops.migrate_disk_and_power_off(context, instance, dest, flavor, block_device_info) def suspend(self, instance): """suspend the specified instance.""" self._vmops.suspend(instance) def resume(self, context, instance, network_info, block_device_info=None): """resume the specified instance.""" self._vmops.resume(instance) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance.""" self._vmops.rescue(context, instance, network_info, image_meta, rescue_password) def set_bootable(self, instance, is_bootable): """Set the ability to power on/off an instance.""" self._vmops.set_bootable(instance, is_bootable) def unrescue(self, instance, network_info): """Unrescue the specified instance.""" self._vmops.unrescue(instance) def power_off(self, instance): """Power off the specified instance.""" self._vmops.power_off(instance) def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance.""" self._vmops.power_on(instance) def soft_delete(self, instance): """Soft delete the specified instance.""" self._vmops.soft_delete(instance) def restore(self, instance): """Restore the specified instance.""" self._vmops.restore(instance) def poll_rebooting_instances(self, timeout, instances): """Poll for rebooting instances.""" self._vmops.poll_rebooting_instances(timeout, instances) def reset_network(self, instance): """reset networking for specified instance.""" self._vmops.reset_network(instance) def inject_network_info(self, instance, network_info): """inject network info for specified instance.""" self._vmops.inject_network_info(instance, network_info) def plug_vifs(self, instance_ref, network_info): """Plug VIFs into networks.""" self._vmops.plug_vifs(instance_ref, network_info) def unplug_vifs(self, instance_ref, network_info): """Unplug VIFs from networks.""" self._vmops.unplug_vifs(instance_ref, network_info) def get_info(self, instance): """Return data about VM instance.""" return self._vmops.get_info(instance) def get_diagnostics(self, instance): """Return data about VM diagnostics.""" return self._vmops.get_diagnostics(instance) def get_all_bw_counters(self, instances): """Return bandwidth usage counters for each interface on each running VM. """ # we only care about VMs that correspond to a nova-managed # instance: imap = dict([(inst['name'], inst['uuid']) for inst in instances]) bwcounters = [] # get a dictionary of instance names. values are dictionaries # of mac addresses with values that are the bw counters: # e.g. {'instance-001' : { 12:34:56:78:90:12 : {'bw_in': 0, ....}} all_counters = self._vmops.get_all_bw_counters() for instance_name, counters in all_counters.iteritems(): if instance_name in imap: # yes these are stats for a nova-managed vm # correlate the stats with the nova instance uuid: for vif_counter in counters.values(): vif_counter['uuid'] = imap[instance_name] bwcounters.append(vif_counter) return bwcounters def get_console_output(self, context, instance): """Return snapshot of console.""" return self._vmops.get_console_output(instance) def get_vnc_console(self, context, instance): """Return link to instance's VNC console.""" return self._vmops.get_vnc_console(instance) def get_volume_connector(self, instance): """Return volume connector information.""" if not self._initiator or not self._hypervisor_hostname: stats = self.get_host_stats(refresh=True) try: self._initiator = stats['host_other-config']['iscsi_iqn'] self._hypervisor_hostname = stats['host_hostname'] except (TypeError, KeyError) as err: LOG.warn(_('Could not determine key: %s') % err, instance=instance) self._initiator = None return { 'ip': self.get_host_ip_addr(), 'initiator': self._initiator, 'host': self._hypervisor_hostname } @staticmethod def get_host_ip_addr(): xs_url = urlparse.urlparse(CONF.xenserver.connection_url) return xs_url.netloc def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): """Attach volume storage to VM instance.""" return self._volumeops.attach_volume(connection_info, instance['name'], mountpoint) def detach_volume(self, connection_info, instance, mountpoint, encryption=None): """Detach volume storage from VM instance.""" return self._volumeops.detach_volume(connection_info, instance['name'], mountpoint) def get_console_pool_info(self, console_type): xs_url = urlparse.urlparse(CONF.xenserver.connection_url) return {'address': xs_url.netloc, 'username': CONF.xenserver.connection_username, 'password': CONF.xenserver.connection_password} def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: ignored in this driver :returns: dictionary describing resources """ host_stats = self.get_host_stats(refresh=True) # Updating host information total_ram_mb = host_stats['host_memory_total'] / units.Mi # NOTE(belliott) memory-free-computed is a value provided by XenServer # for gauging free memory more conservatively than memory-free. free_ram_mb = host_stats['host_memory_free_computed'] / units.Mi total_disk_gb = host_stats['disk_total'] / units.Gi used_disk_gb = host_stats['disk_used'] / units.Gi hyper_ver = utils.convert_version_to_int(self._session.product_version) dic = {'vcpus': host_stats['host_cpu_info']['cpu_count'], 'memory_mb': total_ram_mb, 'local_gb': total_disk_gb, 'vcpus_used': host_stats['vcpus_used'], 'memory_mb_used': total_ram_mb - free_ram_mb, 'local_gb_used': used_disk_gb, 'hypervisor_type': 'xen', 'hypervisor_version': hyper_ver, 'hypervisor_hostname': host_stats['host_hostname'], # Todo(bobba) cpu_info may be in a format not supported by # arch_filter.py - see libvirt/driver.py get_cpu_info 'cpu_info': jsonutils.dumps(host_stats['host_cpu_info']), 'supported_instances': jsonutils.dumps( host_stats['supported_instances']), 'pci_passthrough_devices': jsonutils.dumps( host_stats['pci_passthrough_devices'])} return dic def ensure_filtering_rules_for_instance(self, instance_ref, network_info): # NOTE(salvatore-orlando): it enforces security groups on # host initialization and live migration. # In XenAPI we do not assume instances running upon host initialization return def check_can_live_migrate_destination(self, ctxt, instance_ref, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit """ return self._vmops.check_can_live_migrate_destination(ctxt, instance_ref, block_migration, disk_over_commit) def check_can_live_migrate_destination_cleanup(self, ctxt, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param ctxt: security context :param disk_over_commit: if true, allow disk over commit """ pass def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data, block_device_info=None): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination includes the block_migration flag :param block_device_info: result of _get_instance_block_device_info """ return self._vmops.check_can_live_migrate_source(ctxt, instance_ref, dest_check_data) def get_instance_disk_info(self, instance_name): """Used by libvirt for live migration. We rely on xenapi checks to do this for us. """ pass def live_migration(self, ctxt, instance_ref, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Performs the live migration of the specified instance. :param ctxt: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager.post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :param block_migration: if true, migrate VM disk. :param migrate_data: implementation specific params """ self._vmops.live_migrate(ctxt, instance_ref, dest, post_method, recover_method, block_migration, migrate_data) def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info): # NOTE(johngarbutt) Destroying the VM is not appropriate here # and in the cases where it might make sense, # XenServer has already done it. # TODO(johngarbutt) investigate if any cleanup is required here pass def pre_live_migration(self, context, instance_ref, block_device_info, network_info, data, migrate_data=None): """Preparation live migration. :param block_device_info: It must be the result of _get_instance_volume_bdms() at compute manager. """ # TODO(JohnGarbutt) look again when boot-from-volume hits trunk pre_live_migration_result = {} pre_live_migration_result['sr_uuid_map'] = \ self._vmops.attach_block_device_volumes(block_device_info) return pre_live_migration_result def post_live_migration(self, ctxt, instance_ref, block_device_info, migrate_data=None): """Post operation of live migration at source host. :param ctxt: security context :instance_ref: instance object that was migrated :block_device_info: instance block device information :param migrate_data: if not None, it is a dict which has data """ self._vmops.post_live_migration(ctxt, instance_ref, migrate_data) def post_live_migration_at_destination(self, ctxt, instance_ref, network_info, block_migration, block_device_info=None): """Post operation of live migration at destination host. :param ctxt: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param network_info: instance network information :param : block_migration: if true, post operation of block_migration. """ self._vmops.post_live_migration_at_destination(ctxt, instance_ref, network_info, block_device_info, block_device_info) def unfilter_instance(self, instance_ref, network_info): """Removes security groups configured for an instance.""" return self._vmops.unfilter_instance(instance_ref, network_info) def refresh_security_group_rules(self, security_group_id): """Updates security group rules for all instances associated with a given security group. Invoked when security group rules are updated. """ return self._vmops.refresh_security_group_rules(security_group_id) def refresh_security_group_members(self, security_group_id): """Updates security group rules for all instances associated with a given security group. Invoked when instances are added/removed to a security group. """ return self._vmops.refresh_security_group_members(security_group_id) def refresh_instance_security_rules(self, instance): """Updates security group rules for specified instance. Invoked when instances are added/removed to a security group or when a rule is added/removed to a security group. """ return self._vmops.refresh_instance_security_rules(instance) def refresh_provider_fw_rules(self): return self._vmops.refresh_provider_fw_rules() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first. """ return self.host_state.get_host_stats(refresh=refresh) def host_power_action(self, host, action): """The only valid values for 'action' on XenServer are 'reboot' or 'shutdown', even though the API also accepts 'startup'. As this is not technically possible on XenServer, since the host is the same physical machine as the hypervisor, if this is requested, we need to raise an exception. """ if action in ("reboot", "shutdown"): return self._host.host_power_action(host, action) else: msg = _("Host startup on XenServer is not supported.") raise NotImplementedError(msg) def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" return self._host.set_host_enabled(host, enabled) def get_host_uptime(self, host): """Returns the result of calling "uptime" on the target host.""" return self._host.get_host_uptime(host) def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ return self._host.host_maintenance_mode(host, mode) def add_to_aggregate(self, context, aggregate, host, **kwargs): """Add a compute host to an aggregate.""" return self._pool.add_to_aggregate(context, aggregate, host, **kwargs) def remove_from_aggregate(self, context, aggregate, host, **kwargs): """Remove a compute host from an aggregate.""" return self._pool.remove_from_aggregate(context, aggregate, host, **kwargs) def undo_aggregate_operation(self, context, op, aggregate, host, set_error=True): """Undo aggregate operation when pool error raised.""" return self._pool.undo_aggregate_operation(context, op, aggregate, host, set_error) def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted.""" self._vmops.power_on(instance) def get_per_instance_usage(self): """Get information about instance resource usage. :returns: dict of nova uuid => dict of usage info """ return self._vmops.get_per_instance_usage() nova-2014.1.5/nova/virt/xenapi/volume_utils.py0000664000567000056700000002471012540642544022375 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Helper methods for operations related to the management of volumes, and storage repositories """ import re import string from eventlet import greenthread from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging xenapi_volume_utils_opts = [ cfg.IntOpt('introduce_vdi_retry_wait', default=20, help='Number of seconds to wait for an SR to settle ' 'if the VDI does not exist when first introduced'), ] CONF = cfg.CONF CONF.register_opts(xenapi_volume_utils_opts, 'xenserver') LOG = logging.getLogger(__name__) class StorageError(Exception): """To raise errors related to SR, VDI, PBD, and VBD commands.""" def __init__(self, message=None): super(StorageError, self).__init__(message) def _handle_sr_params(params): if 'id' in params: del params['id'] sr_type = params.pop('sr_type', 'iscsi') sr_desc = params.pop('name_description', '') return sr_type, sr_desc def create_sr(session, label, params): LOG.debug(_('Creating SR %s'), label) sr_type, sr_desc = _handle_sr_params(params) sr_ref = session.call_xenapi("SR.create", session.host_ref, params, '0', label, sr_desc, sr_type, '', False, {}) return sr_ref def introduce_sr(session, sr_uuid, label, params): LOG.debug(_('Introducing SR %s'), label) sr_type, sr_desc = _handle_sr_params(params) sr_ref = session.call_xenapi('SR.introduce', sr_uuid, label, sr_desc, sr_type, '', False, params) LOG.debug(_('Creating PBD for SR')) pbd_ref = create_pbd(session, sr_ref, params) LOG.debug(_('Plugging SR')) session.call_xenapi("PBD.plug", pbd_ref) session.call_xenapi("SR.scan", sr_ref) return sr_ref def forget_sr(session, sr_ref): """Forgets the storage repository without destroying the VDIs within.""" LOG.debug(_('Forgetting SR...')) unplug_pbds(session, sr_ref) session.call_xenapi("SR.forget", sr_ref) def find_sr_by_uuid(session, sr_uuid): """Return the storage repository given a uuid.""" try: return session.call_xenapi("SR.get_by_uuid", sr_uuid) except session.XenAPI.Failure as exc: if exc.details[0] == 'UUID_INVALID': return None raise def find_sr_from_vbd(session, vbd_ref): """Find the SR reference from the VBD reference.""" try: vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref) sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref) except session.XenAPI.Failure as exc: LOG.exception(exc) raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref) return sr_ref def create_pbd(session, sr_ref, params): pbd_rec = {} pbd_rec['host'] = session.host_ref pbd_rec['SR'] = sr_ref pbd_rec['device_config'] = params pbd_ref = session.call_xenapi("PBD.create", pbd_rec) return pbd_ref def unplug_pbds(session, sr_ref): try: pbds = session.call_xenapi("SR.get_PBDs", sr_ref) except session.XenAPI.Failure as exc: LOG.warn(_('Ignoring exception %(exc)s when getting PBDs' ' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref}) return for pbd in pbds: try: session.call_xenapi("PBD.unplug", pbd) except session.XenAPI.Failure as exc: LOG.warn(_('Ignoring exception %(exc)s when unplugging' ' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd}) def _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun): if vdi_uuid: LOG.debug("vdi_uuid: %s" % vdi_uuid) return session.call_xenapi("VDI.get_by_uuid", vdi_uuid) elif target_lun: vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref) for curr_ref in vdi_refs: curr_rec = session.call_xenapi("VDI.get_record", curr_ref) if ('sm_config' in curr_rec and 'LUNid' in curr_rec['sm_config'] and curr_rec['sm_config']['LUNid'] == str(target_lun)): return curr_ref else: return (session.call_xenapi("SR.get_VDIs", sr_ref))[0] return None def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None): """Introduce VDI in the host.""" try: vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) if vdi_ref is None: greenthread.sleep(CONF.xenserver.introduce_vdi_retry_wait) session.call_xenapi("SR.scan", sr_ref) vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun) except session.XenAPI.Failure as exc: LOG.exception(exc) raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref) if not vdi_ref: raise StorageError(_('VDI not found on SR %(sr)s (vdi_uuid ' '%(vdi_uuid)s, target_lun %(target_lun)s)') % {'sr': sr_ref, 'vdi_uuid': vdi_uuid, 'target_lun': target_lun}) try: vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) LOG.debug(vdi_rec) LOG.debug(type(vdi_rec)) except session.XenAPI.Failure as exc: LOG.exception(exc) raise StorageError(_('Unable to get record' ' of VDI %s on') % vdi_ref) if vdi_rec['managed']: # We do not need to introduce the vdi return vdi_ref try: return session.call_xenapi("VDI.introduce", vdi_rec['uuid'], vdi_rec['name_label'], vdi_rec['name_description'], vdi_rec['SR'], vdi_rec['type'], vdi_rec['sharable'], vdi_rec['read_only'], vdi_rec['other_config'], vdi_rec['location'], vdi_rec['xenstore_data'], vdi_rec['sm_config']) except session.XenAPI.Failure as exc: LOG.exception(exc) raise StorageError(_('Unable to introduce VDI for SR %s') % sr_ref) def purge_sr(session, sr_ref): # Make sure no VBDs are referencing the SR VDIs vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref) for vdi_ref in vdi_refs: vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref) if vbd_refs: LOG.warn(_('Cannot purge SR with referenced VDIs')) return forget_sr(session, sr_ref) def get_device_number(mountpoint): device_number = mountpoint_to_number(mountpoint) if device_number < 0: raise StorageError(_('Unable to obtain target information %s') % mountpoint) return device_number def parse_sr_info(connection_data, description=''): label = connection_data.pop('name_label', 'tempSR-%s' % connection_data.get('volume_id')) params = {} if 'sr_uuid' not in connection_data: params = parse_volume_info(connection_data) # This magic label sounds a lot like 'False Disc' in leet-speak uuid = "FA15E-D15C-" + str(params['id']) else: uuid = connection_data['sr_uuid'] for k in connection_data.get('introduce_sr_keys', {}): params[k] = connection_data[k] params['name_description'] = connection_data.get('name_description', description) return (uuid, label, params) def parse_volume_info(connection_data): """Parse device_path and mountpoint as they can be used by XenAPI. In particular, the mountpoint (e.g. /dev/sdc) must be translated into a numeric literal. """ volume_id = connection_data['volume_id'] target_portal = connection_data['target_portal'] target_host = _get_target_host(target_portal) target_port = _get_target_port(target_portal) target_iqn = connection_data['target_iqn'] log_params = { "vol_id": volume_id, "host": target_host, "port": target_port, "iqn": target_iqn } LOG.debug(_('(vol_id,host,port,iqn): ' '(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)'), log_params) if (volume_id is None or target_host is None or target_iqn is None): raise StorageError(_('Unable to obtain target information' ' %s') % connection_data) volume_info = {} volume_info['id'] = volume_id volume_info['target'] = target_host volume_info['port'] = target_port volume_info['targetIQN'] = target_iqn if ('auth_method' in connection_data and connection_data['auth_method'] == 'CHAP'): volume_info['chapuser'] = connection_data['auth_username'] volume_info['chappassword'] = connection_data['auth_password'] return volume_info def mountpoint_to_number(mountpoint): """Translate a mountpoint like /dev/sdc into a numeric.""" if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] if re.match('^[hs]d[a-p]$', mountpoint): return (ord(mountpoint[2:3]) - ord('a')) elif re.match('^x?vd[a-p]$', mountpoint): return (ord(mountpoint[-1]) - ord('a')) elif re.match('^[0-9]+$', mountpoint): return string.atoi(mountpoint, 10) else: LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint) return -1 def _get_target_host(iscsi_string): """Retrieve target host.""" if iscsi_string: host = iscsi_string.split(':')[0] if len(host) > 0: return host return CONF.xenserver.target_host def _get_target_port(iscsi_string): """Retrieve target port.""" if iscsi_string and ':' in iscsi_string: return iscsi_string.split(':')[1] return CONF.xenserver.target_port nova-2014.1.5/nova/virt/xenapi/fake.py0000664000567000056700000011024412540642544020552 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # #============================================================================ # # Parts of this file are based upon xmlrpclib.py, the XML-RPC client # interface included in the Python distribution. # # Copyright (c) 1999-2002 by Secret Labs AB # Copyright (c) 1999-2002 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- """ A fake XenAPI SDK. """ import base64 import pickle import random import uuid from xml.sax import saxutils import zlib import pprint from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.openstack.common import units from nova.virt.xenapi.client import session as xenapi_session _CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD', 'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task'] _db_content = {} LOG = logging.getLogger(__name__) def log_db_contents(msg=None): text = msg or "" content = pprint.pformat(_db_content) LOG.debug(_("%(text)s: _db_content => %(content)s"), {'text': text, 'content': content}) def reset(): for c in _CLASSES: _db_content[c] = {} host = create_host('fake') create_vm('fake dom 0', 'Running', is_a_template=False, is_control_domain=True, resident_on=host) def reset_table(table): if table not in _CLASSES: return _db_content[table] = {} def _create_pool(name_label): return _create_object('pool', {'name_label': name_label}) def create_host(name_label, hostname='fake_name', address='fake_addr'): host_ref = _create_object('host', {'name_label': name_label, 'hostname': hostname, 'address': address}) host_default_sr_ref = _create_local_srs(host_ref) _create_local_pif(host_ref) # Create a pool if we don't have one already if len(_db_content['pool']) == 0: pool_ref = _create_pool('') _db_content['pool'][pool_ref]['master'] = host_ref _db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref _db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref def create_network(name_label, bridge): return _create_object('network', {'name_label': name_label, 'bridge': bridge}) def create_vm(name_label, status, **kwargs): if status == 'Running': domid = random.randrange(1, 1 << 16) resident_on = _db_content['host'].keys()[0] else: domid = -1 resident_on = '' vm_rec = kwargs.copy() vm_rec.update({'name_label': name_label, 'domid': domid, 'power_state': status, 'blocked_operations': {}, 'resident_on': resident_on}) vm_ref = _create_object('VM', vm_rec) after_VM_create(vm_ref, vm_rec) return vm_ref def destroy_vm(vm_ref): vm_rec = _db_content['VM'][vm_ref] vbd_refs = vm_rec['VBDs'] # NOTE(johannes): Shallow copy since destroy_vbd will remove itself # from the list for vbd_ref in vbd_refs[:]: destroy_vbd(vbd_ref) del _db_content['VM'][vm_ref] def destroy_vbd(vbd_ref): vbd_rec = _db_content['VBD'][vbd_ref] vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'].remove(vbd_ref) vdi_ref = vbd_rec['VDI'] vdi_rec = _db_content['VDI'][vdi_ref] vdi_rec['VBDs'].remove(vbd_ref) del _db_content['VBD'][vbd_ref] def destroy_vdi(vdi_ref): vdi_rec = _db_content['VDI'][vdi_ref] vbd_refs = vdi_rec['VBDs'] # NOTE(johannes): Shallow copy since destroy_vbd will remove itself # from the list for vbd_ref in vbd_refs[:]: destroy_vbd(vbd_ref) del _db_content['VDI'][vdi_ref] def create_vdi(name_label, sr_ref, **kwargs): vdi_rec = { 'SR': sr_ref, 'read_only': False, 'type': '', 'name_label': name_label, 'name_description': '', 'sharable': False, 'other_config': {}, 'location': '', 'xenstore_data': {}, 'sm_config': {'vhd-parent': None}, 'physical_utilisation': '123', 'managed': True, } vdi_rec.update(kwargs) vdi_ref = _create_object('VDI', vdi_rec) after_VDI_create(vdi_ref, vdi_rec) return vdi_ref def after_VDI_create(vdi_ref, vdi_rec): vdi_rec.setdefault('VBDs', []) def create_vbd(vm_ref, vdi_ref, userdevice=0): vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref, 'userdevice': str(userdevice), 'currently_attached': False} vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref def after_VBD_create(vbd_ref, vbd_rec): """Create read-only fields and backref from VM and VDI to VBD when VBD is created. """ vbd_rec['currently_attached'] = False vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'].append(vbd_ref) vm_name_label = _db_content['VM'][vm_ref]['name_label'] vbd_rec['vm_name_label'] = vm_name_label vdi_ref = vbd_rec['VDI'] if vdi_ref and vdi_ref != "OpaqueRef:NULL": vdi_rec = _db_content['VDI'][vdi_ref] vdi_rec['VBDs'].append(vbd_ref) def after_VM_create(vm_ref, vm_rec): """Create read-only fields in the VM record.""" vm_rec.setdefault('domid', -1) vm_rec.setdefault('is_control_domain', False) vm_rec.setdefault('is_a_template', False) vm_rec.setdefault('memory_static_max', str(8 * units.Gi)) vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi)) vm_rec.setdefault('VCPUs_max', str(4)) vm_rec.setdefault('VBDs', []) vm_rec.setdefault('resident_on', '') def create_pbd(host_ref, sr_ref, attached): config = {'path': '/var/run/sr-mount/%s' % sr_ref} return _create_object('PBD', {'device_config': config, 'host': host_ref, 'SR': sr_ref, 'currently_attached': attached}) def create_task(name_label): return _create_object('task', {'name_label': name_label, 'status': 'pending'}) def _create_local_srs(host_ref): """Create an SR that looks like the one created on the local disk by default by the XenServer installer. Also, fake the installation of an ISO SR. """ create_sr(name_label='Local storage ISO', type='iso', other_config={'i18n-original-value-name_label': 'Local storage ISO', 'i18n-key': 'local-storage-iso'}, physical_size=80000, physical_utilisation=40000, virtual_allocation=80000, host_ref=host_ref) return create_sr(name_label='Local storage', type='ext', other_config={'i18n-original-value-name_label': 'Local storage', 'i18n-key': 'local-storage'}, physical_size=40000, physical_utilisation=20000, virtual_allocation=10000, host_ref=host_ref) def create_sr(**kwargs): sr_ref = _create_object( 'SR', {'name_label': kwargs.get('name_label'), 'type': kwargs.get('type'), 'content_type': kwargs.get('type', 'user'), 'shared': kwargs.get('shared', False), 'physical_size': kwargs.get('physical_size', str(1 << 30)), 'physical_utilisation': str( kwargs.get('physical_utilisation', 0)), 'virtual_allocation': str(kwargs.get('virtual_allocation', 0)), 'other_config': kwargs.get('other_config', {}), 'VDIs': kwargs.get('VDIs', [])}) pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True) _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] return sr_ref def _create_local_pif(host_ref): pif_ref = _create_object('PIF', {'name-label': 'Fake PIF', 'MAC': '00:11:22:33:44:55', 'physical': True, 'VLAN': -1, 'device': 'fake0', 'host_uuid': host_ref, 'network': '', 'IP': '10.1.1.1', 'IPv6': '', 'uuid': '', 'management': 'true'}) _db_content['PIF'][pif_ref]['uuid'] = pif_ref return pif_ref def _create_object(table, obj): ref = str(uuid.uuid4()) obj['uuid'] = str(uuid.uuid4()) _db_content[table][ref] = obj return ref def _create_sr(table, obj): sr_type = obj[6] # Forces fake to support iscsi only if sr_type != 'iscsi' and sr_type != 'nfs': raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) host_ref = _db_content['host'].keys()[0] sr_ref = _create_object(table, obj[2]) if sr_type == 'iscsi': vdi_ref = create_vdi('', sr_ref) pbd_ref = create_pbd(host_ref, sr_ref, True) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref _db_content['PBD'][pbd_ref]['SR'] = sr_ref return sr_ref def _create_vlan(pif_ref, vlan_num, network_ref): pif_rec = get_record('PIF', pif_ref) vlan_pif_ref = _create_object('PIF', {'name-label': 'Fake VLAN PIF', 'MAC': '00:11:22:33:44:55', 'physical': True, 'VLAN': vlan_num, 'device': pif_rec['device'], 'host_uuid': pif_rec['host_uuid']}) return _create_object('VLAN', {'tagged-pif': pif_ref, 'untagged-pif': vlan_pif_ref, 'tag': vlan_num}) def get_all(table): return _db_content[table].keys() def get_all_records(table): return _db_content[table] def _query_matches(record, query): # Simple support for the XenServer query language: # 'field "host"="" and field "SR"=""' # Tested through existing tests (e.g. calls to find_network_with_bridge) and_clauses = query.split(" and ") if len(and_clauses) > 1: matches = True for clause in and_clauses: matches = matches and _query_matches(record, clause) return matches or_clauses = query.split(" or ") if len(or_clauses) > 1: matches = False for clause in or_clauses: matches = matches or _query_matches(record, clause) return matches if query[:4] == 'not ': return not _query_matches(record, query[4:]) # Now it must be a single field - bad queries never match if query[:5] != 'field': return False (field, value) = query[6:].split('=', 1) # Some fields (e.g. name_label, memory_overhead) have double # underscores in the DB, but only single underscores when querying field = field.replace("__", "_").strip(" \"'") value = value.strip(" \"'") # Strings should be directly compared if isinstance(record[field], str): return record[field] == value # But for all other value-checks, convert to a string first # (Notably used for booleans - which can be lower or camel # case and are interpreted/sanitised by XAPI) return str(record[field]).lower() == value.lower() def get_all_records_where(table_name, query): matching_records = {} table = _db_content[table_name] for record in table: if _query_matches(table[record], query): matching_records[record] = table[record] return matching_records def get_record(table, ref): if ref in _db_content[table]: return _db_content[table].get(ref) else: raise Failure(['HANDLE_INVALID', table, ref]) def check_for_session_leaks(): if len(_db_content['session']) > 0: raise exception.NovaException('Sessions have leaked: %s' % _db_content['session']) def as_value(s): """Helper function for simulating XenAPI plugin responses. It escapes and wraps the given argument. """ return '%s' % saxutils.escape(s) def as_json(*args, **kwargs): """Helper function for simulating XenAPI plugin responses for those that are returning JSON. If this function is given plain arguments, then these are rendered as a JSON list. If it's given keyword arguments then these are rendered as a JSON dict. """ arg = args or kwargs return jsonutils.dumps(arg) class Failure(Exception): def __init__(self, details): self.details = details def __str__(self): try: return str(self.details) except Exception: return "XenAPI Fake Failure: %s" % str(self.details) def _details_map(self): return dict([(str(i), self.details[i]) for i in range(len(self.details))]) class SessionBase(object): """Base class for Fake Sessions.""" def __init__(self, uri): self._session = None xenapi_session.apply_session_helpers(self) def pool_get_default_SR(self, _1, pool_ref): return _db_content['pool'].values()[0]['default-SR'] def VBD_insert(self, _1, vbd_ref, vdi_ref): vbd_rec = get_record('VBD', vbd_ref) get_record('VDI', vdi_ref) vbd_rec['empty'] = False vbd_rec['VDI'] = vdi_ref def VBD_plug(self, _1, ref): rec = get_record('VBD', ref) if rec['currently_attached']: raise Failure(['DEVICE_ALREADY_ATTACHED', ref]) rec['currently_attached'] = True rec['device'] = rec['userdevice'] def VBD_unplug(self, _1, ref): rec = get_record('VBD', ref) if not rec['currently_attached']: raise Failure(['DEVICE_ALREADY_DETACHED', ref]) rec['currently_attached'] = False rec['device'] = '' def VBD_add_to_other_config(self, _1, vbd_ref, key, value): db_ref = _db_content['VBD'][vbd_ref] if 'other_config' not in db_ref: db_ref['other_config'] = {} if key in db_ref['other_config']: raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config', vbd_ref, key]) db_ref['other_config'][key] = value def VBD_get_other_config(self, _1, vbd_ref): db_ref = _db_content['VBD'][vbd_ref] if 'other_config' not in db_ref: return {} return db_ref['other_config'] def PBD_create(self, _1, pbd_rec): pbd_ref = _create_object('PBD', pbd_rec) _db_content['PBD'][pbd_ref]['currently_attached'] = False return pbd_ref def PBD_plug(self, _1, pbd_ref): rec = get_record('PBD', pbd_ref) if rec['currently_attached']: raise Failure(['DEVICE_ALREADY_ATTACHED', rec]) rec['currently_attached'] = True sr_ref = rec['SR'] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] def PBD_unplug(self, _1, pbd_ref): rec = get_record('PBD', pbd_ref) if not rec['currently_attached']: raise Failure(['DEVICE_ALREADY_DETACHED', rec]) rec['currently_attached'] = False sr_ref = rec['SR'] _db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref) def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type, shared, sm_config): ref = None rec = None for ref, rec in _db_content['SR'].iteritems(): if rec.get('uuid') == sr_uuid: # make forgotten = 0 and return ref _db_content['SR'][ref]['forgotten'] = 0 return ref # SR not found in db, so we create one params = {'sr_uuid': sr_uuid, 'label': label, 'desc': desc, 'type': type, 'content_type': content_type, 'shared': shared, 'sm_config': sm_config} sr_ref = _create_object('SR', params) _db_content['SR'][sr_ref]['uuid'] = sr_uuid _db_content['SR'][sr_ref]['forgotten'] = 0 vdi_per_lun = False if type == 'iscsi': # Just to be clear vdi_per_lun = True if vdi_per_lun: # we need to create a vdi because this introduce # is likely meant for a single vdi vdi_ref = create_vdi('', sr_ref) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref return sr_ref def SR_forget(self, _1, sr_ref): _db_content['SR'][sr_ref]['forgotten'] = 1 def SR_scan(self, _1, sr_ref): return def VM_get_xenstore_data(self, _1, vm_ref): return _db_content['VM'][vm_ref].get('xenstore_data', {}) def VM_remove_from_xenstore_data(self, _1, vm_ref, key): db_ref = _db_content['VM'][vm_ref] if 'xenstore_data' not in db_ref: return if key in db_ref['xenstore_data']: del db_ref['xenstore_data'][key] def VM_add_to_xenstore_data(self, _1, vm_ref, key, value): db_ref = _db_content['VM'][vm_ref] if 'xenstore_data' not in db_ref: db_ref['xenstore_data'] = {} db_ref['xenstore_data'][key] = value def VM_pool_migrate(self, _1, vm_ref, host_ref, options): pass def VDI_remove_from_other_config(self, _1, vdi_ref, key): db_ref = _db_content['VDI'][vdi_ref] if 'other_config' not in db_ref: return if key in db_ref['other_config']: del db_ref['other_config'][key] def VDI_add_to_other_config(self, _1, vdi_ref, key, value): db_ref = _db_content['VDI'][vdi_ref] if 'other_config' not in db_ref: db_ref['other_config'] = {} if key in db_ref['other_config']: raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config', vdi_ref, key]) db_ref['other_config'][key] = value def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref): db_ref = _db_content['VDI'][vdi_to_copy_ref] name_label = db_ref['name_label'] read_only = db_ref['read_only'] sharable = db_ref['sharable'] other_config = db_ref['other_config'].copy() return create_vdi(name_label, sr_ref, sharable=sharable, read_only=read_only, other_config=other_config) def VDI_clone(self, _1, vdi_to_clone_ref): db_ref = _db_content['VDI'][vdi_to_clone_ref] sr_ref = db_ref['SR'] return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref) def host_compute_free_memory(self, _1, ref): #Always return 12GB available return 12 * units.Gi def _plugin_agent_version(self, method, args): return as_json(returncode='0', message='1.0\\r\\n') def _plugin_agent_key_init(self, method, args): return as_json(returncode='D0', message='1') def _plugin_agent_password(self, method, args): return as_json(returncode='0', message='success') def _plugin_agent_inject_file(self, method, args): return as_json(returncode='0', message='success') def _plugin_agent_resetnetwork(self, method, args): return as_json(returncode='0', message='success') def _plugin_agent_agentupdate(self, method, args): url = args["url"] md5 = args["md5sum"] message = "success with %(url)s and hash:%(md5)s" % dict(url=url, md5=md5) return as_json(returncode='0', message=message) def _plugin_noop(self, method, args): return '' def _plugin_pickle_noop(self, method, args): return pickle.dumps(None) def _plugin_migration_transfer_vhd(self, method, args): kwargs = pickle.loads(args['params'])['kwargs'] vdi_ref = self.xenapi_request('VDI.get_by_uuid', (kwargs['vdi_uuid'], )) assert vdi_ref return pickle.dumps(None) _plugin_glance_upload_vhd = _plugin_pickle_noop _plugin_kernel_copy_vdi = _plugin_noop _plugin_kernel_create_kernel_ramdisk = _plugin_noop _plugin_kernel_remove_kernel_ramdisk = _plugin_noop _plugin_migration_move_vhds_into_sr = _plugin_noop def _plugin_xenhost_host_data(self, method, args): return jsonutils.dumps({'host_memory': {'total': 10, 'overhead': 20, 'free': 30, 'free-computed': 40}, 'host_hostname': 'fake-xenhost', 'host_cpu_info': {'cpu_count': 50}, }) def _plugin_poweraction(self, method, args): return jsonutils.dumps({"power_action": method[5:]}) _plugin_xenhost_host_reboot = _plugin_poweraction _plugin_xenhost_host_startup = _plugin_poweraction _plugin_xenhost_host_shutdown = _plugin_poweraction def _plugin_xenhost_set_host_enabled(self, method, args): enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled' return jsonutils.dumps({"status": enabled}) def _plugin_xenhost_host_uptime(self, method, args): return jsonutils.dumps({"uptime": "fake uptime"}) def _plugin_xenhost_get_pci_device_details(self, method, args): """Simulate the ouput of three pci devices. Both of those devices are available for pci passtrough but only one will match with the pci whitelist used in the method test_pci_passthrough_devices_*(). Return a single list. """ # Driver is not pciback dev_bad1 = ["Slot:\t86:10.0", "Class:\t0604", "Vendor:\t10b5", "Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"] # Driver is pciback but vendor and device are bad dev_bad2 = ["Slot:\t88:00.0", "Class:\t0300", "Vendor:\t0bad", "Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d", "Rev:\ta1", "Driver:\tpciback", "\n"] # Driver is pciback and vendor, device are used for matching dev_good = ["Slot:\t87:00.0", "Class:\t0300", "Vendor:\t10de", "Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d", "Rev:\ta1", "Driver:\tpciback", "\n"] lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good) return pickle.dumps(lspci_output) def _plugin_xenhost_get_pci_type(self, method, args): return pickle.dumps("type-PCI") def _plugin_console_get_console_log(self, method, args): dom_id = args["dom_id"] if dom_id == 0: raise Failure('Guest does not have a console') return base64.b64encode(zlib.compress("dom_id: %s" % dom_id)) def _plugin_nova_plugin_version_get_version(self, method, args): return pickle.dumps("1.2") def _plugin_xenhost_query_gc(self, method, args): return pickle.dumps("False") def host_call_plugin(self, _1, _2, plugin, method, args): func = getattr(self, '_plugin_%s_%s' % (plugin, method), None) if not func: raise Exception('No simulation in host_call_plugin for %s,%s' % (plugin, method)) return func(method, args) def VDI_get_virtual_size(self, *args): return 1 * units.Gi def VDI_resize_online(self, *args): return 'derp' VDI_resize = VDI_resize_online def _VM_reboot(self, session, vm_ref): db_ref = _db_content['VM'][vm_ref] if db_ref['power_state'] != 'Running': raise Failure(['VM_BAD_POWER_STATE', 'fake-opaque-ref', db_ref['power_state'].lower(), 'halted']) db_ref['power_state'] = 'Running' db_ref['domid'] = random.randrange(1, 1 << 16) def VM_clean_reboot(self, session, vm_ref): return self._VM_reboot(session, vm_ref) def VM_hard_reboot(self, session, vm_ref): return self._VM_reboot(session, vm_ref) def VM_hard_shutdown(self, session, vm_ref): db_ref = _db_content['VM'][vm_ref] db_ref['power_state'] = 'Halted' db_ref['domid'] = -1 VM_clean_shutdown = VM_hard_shutdown def VM_suspend(self, session, vm_ref): db_ref = _db_content['VM'][vm_ref] db_ref['power_state'] = 'Suspended' def VM_pause(self, session, vm_ref): db_ref = _db_content['VM'][vm_ref] db_ref['power_state'] = 'Paused' def pool_eject(self, session, host_ref): pass def pool_join(self, session, hostname, username, password): pass def pool_set_name_label(self, session, pool_ref, name): pass def host_migrate_receive(self, session, destref, nwref, options): return "fake_migrate_data" def VM_assert_can_migrate(self, session, vmref, migrate_data, live, vdi_map, vif_map, options): pass def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map, vif_map, options): pass def VM_remove_from_blocked_operations(self, session, vm_ref, key): # operation is idempotent, XenServer doesn't care if the key exists _db_content['VM'][vm_ref]['blocked_operations'].pop(key, None) def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) return None elif methodname == 'logout' or methodname == 'session.logout': self._logout() return None else: full_params = (self._session,) + params meth = getattr(self, methodname, None) if meth is None: LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( _('xenapi.fake does not have an implementation for %s') % methodname) return meth(*full_params) def _login(self, method, params): self._session = str(uuid.uuid4()) _session_info = {'uuid': str(uuid.uuid4()), 'this_host': _db_content['host'].keys()[0]} _db_content['session'][self._session] = _session_info def _logout(self): s = self._session self._session = None if s not in _db_content['session']: raise exception.NovaException( "Logging out a session that is invalid or already logged " "out: %s" % s) del _db_content['session'][s] def __getattr__(self, name): if name == 'handle': return self._session elif name == 'xenapi': return _Dispatcher(self.xenapi_request, None) elif name.startswith('login') or name.startswith('slave_local'): return lambda *params: self._login(name, params) elif name.startswith('Async'): return lambda *params: self._async(name, params) elif '.' in name: impl = getattr(self, name.replace('.', '_')) if impl is not None: def callit(*params): LOG.debug(_('Calling %(name)s %(impl)s'), {'name': name, 'impl': impl}) self._check_session(params) return impl(*params) return callit if self._is_gettersetter(name, True): LOG.debug(_('Calling getter %s'), name) return lambda *params: self._getter(name, params) elif self._is_gettersetter(name, False): LOG.debug(_('Calling setter %s'), name) return lambda *params: self._setter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) elif self._is_destroy(name): return lambda *params: self._destroy(name, params) elif name == 'XenAPI': return FakeXenAPI() else: return None def _is_gettersetter(self, name, getter): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and bits[1].startswith(getter and 'get_' or 'set_')) def _is_create(self, name): return self._is_method(name, 'create') def _is_destroy(self, name): return self._is_method(name, 'destroy') def _is_method(self, name, meth): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and bits[1] == meth) def _getter(self, name, params): self._check_session(params) (cls, func) = name.split('.') if func == 'get_all': self._check_arg_count(params, 1) return get_all(cls) if func == 'get_all_records': self._check_arg_count(params, 1) return get_all_records(cls) if func == 'get_all_records_where': self._check_arg_count(params, 2) return get_all_records_where(cls, params[1]) if func == 'get_record': self._check_arg_count(params, 2) return get_record(cls, params[1]) if func in ('get_by_name_label', 'get_by_uuid'): self._check_arg_count(params, 2) return_singleton = (func == 'get_by_uuid') return self._get_by_field( _db_content[cls], func[len('get_by_'):], params[1], return_singleton=return_singleton) if len(params) == 2: field = func[len('get_'):] ref = params[1] if (ref in _db_content[cls]): if (field in _db_content[cls][ref]): return _db_content[cls][ref][field] else: raise Failure(['HANDLE_INVALID', cls, ref]) LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( _('xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments') % name) def _setter(self, name, params): self._check_session(params) (cls, func) = name.split('.') if len(params) == 3: field = func[len('set_'):] ref = params[1] val = params[2] if (ref in _db_content[cls] and field in _db_content[cls][ref]): _db_content[cls][ref][field] = val return LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( 'xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments or the database ' 'is missing that field' % name) def _create(self, name, params): self._check_session(params) is_sr_create = name == 'SR.create' is_vlan_create = name == 'VLAN.create' # Storage Repositories have a different API expected = is_sr_create and 10 or is_vlan_create and 4 or 2 self._check_arg_count(params, expected) (cls, _) = name.split('.') ref = (is_sr_create and _create_sr(cls, params) or is_vlan_create and _create_vlan(params[1], params[2], params[3]) or _create_object(cls, params[1])) # Call hook to provide any fixups needed (ex. creating backrefs) after_hook = 'after_%s_create' % cls if after_hook in globals(): globals()[after_hook](ref, params[1]) obj = get_record(cls, ref) # Add RO fields if cls == 'VM': obj['power_state'] = 'Halted' return ref def _destroy(self, name, params): self._check_session(params) self._check_arg_count(params, 2) table = name.split('.')[0] ref = params[1] if ref not in _db_content[table]: raise Failure(['HANDLE_INVALID', table, ref]) # Call destroy function (if exists) destroy_func = globals().get('destroy_%s' % table.lower()) if destroy_func: destroy_func(ref) else: del _db_content[table][ref] def _async(self, name, params): task_ref = create_task(name) task = _db_content['task'][task_ref] func = name[len('Async.'):] try: result = self.xenapi_request(func, params[1:]) if result: result = as_value(result) task['result'] = result task['status'] = 'success' except Failure as exc: task['error_info'] = exc.details task['status'] = 'failed' task['finished'] = timeutils.utcnow() return task_ref def _check_session(self, params): if (self._session is None or self._session not in _db_content['session']): raise Failure(['HANDLE_INVALID', 'session', self._session]) if len(params) == 0 or params[0] != self._session: LOG.debug(_('Raising NotImplemented')) raise NotImplementedError('Call to XenAPI without using .xenapi') def _check_arg_count(self, params, expected): actual = len(params) if actual != expected: raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH', expected, actual]) def _get_by_field(self, recs, k, v, return_singleton): result = [] for ref, rec in recs.iteritems(): if rec.get(k) == v: result.append(ref) if return_singleton: try: return result[0] except IndexError: raise Failure(['UUID_INVALID', v, result, recs, k]) return result class FakeXenAPI(object): def __init__(self): self.Failure = Failure # Based upon _Method from xmlrpclib. class _Dispatcher: def __init__(self, send, name): self.__send = send self.__name = name def __repr__(self): if self.__name: return '' % self.__name else: return '' def __getattr__(self, name): if self.__name is None: return _Dispatcher(self.__send, name) else: return _Dispatcher(self.__send, "%s.%s" % (self.__name, name)) def __call__(self, *args): return self.__send(self.__name, args) nova-2014.1.5/nova/virt/xenapi/vmops.py0000664000567000056700000026350012540642544021014 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for VM-related functions (spawn, reboot, etc). """ import base64 import functools import time import zlib from eventlet import greenthread import netaddr from oslo.config import cfg from nova import block_device from nova import compute from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova.objects import aggregate as aggregate_obj from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova.openstack.common import timeutils from nova.openstack.common import units from nova.pci import pci_manager from nova import utils from nova.virt import configdrive from nova.virt import driver as virt_driver from nova.virt import firewall from nova.virt.xenapi import agent as xapi_agent from nova.virt.xenapi import pool_states from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volume_utils from nova.virt.xenapi import volumeops LOG = logging.getLogger(__name__) xenapi_vmops_opts = [ cfg.IntOpt('running_timeout', default=60, deprecated_name='xenapi_running_timeout', deprecated_group='DEFAULT', help='Number of seconds to wait for instance ' 'to go to running state'), cfg.StrOpt('vif_driver', default='nova.virt.xenapi.vif.XenAPIBridgeDriver', deprecated_name='xenapi_vif_driver', deprecated_group='DEFAULT', help='The XenAPI VIF driver using XenServer Network APIs.'), cfg.StrOpt('image_upload_handler', default='nova.virt.xenapi.image.glance.GlanceStore', deprecated_name='xenapi_image_upload_handler', deprecated_group='DEFAULT', help='Dom0 plugin driver used to handle image uploads.'), ] CONF = cfg.CONF # xenapi_vmops options in the DEFAULT group were deprecated in Icehouse CONF.register_opts(xenapi_vmops_opts, 'xenserver') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( firewall.__name__, firewall.IptablesFirewallDriver.__name__) RESIZE_TOTAL_STEPS = 5 DEVICE_ROOT = '0' DEVICE_RESCUE = '1' DEVICE_SWAP = '2' DEVICE_CONFIGDRIVE = '3' # Note(johngarbutt) HVM guests only support four devices # until the PV tools activate, when others before available # As such, ephemeral disk only available once PV tools load # Note(johngarbutt) When very large ephemeral storage is required, # multiple disks may be added. In this case the device id below # is the used for the first disk. The second disk will be given # next device id, i.e. 5, and so on, until enough space is added. DEVICE_EPHEMERAL = '4' # Note(johngarbutt) Currently don't support ISO boot during rescue # and we must have the ISO visible before the PV drivers start DEVICE_CD = '1' def make_step_decorator(context, instance, update_instance_progress, total_offset=0): """Factory to create a decorator that records instance progress as a series of discrete steps. Each time the decorator is invoked we bump the total-step-count, so after:: @step def step1(): ... @step def step2(): ... we have a total-step-count of 2. Each time the step-function (not the step-decorator!) is invoked, we bump the current-step-count by 1, so after:: step1() the current-step-count would be 1 giving a progress of ``1 / 2 * 100`` or 50%. """ step_info = dict(total=total_offset, current=0) def bump_progress(): step_info['current'] += 1 update_instance_progress(context, instance, step_info['current'], step_info['total']) def step_decorator(f): step_info['total'] += 1 @functools.wraps(f) def inner(*args, **kwargs): rv = f(*args, **kwargs) bump_progress() return rv return inner return step_decorator class VMOps(object): """Management class for VM-related tasks.""" def __init__(self, session, virtapi): self.compute_api = compute.API() self._session = session self._virtapi = virtapi self._volumeops = volumeops.VolumeOps(self._session) self.firewall_driver = firewall.load_driver( DEFAULT_FIREWALL_DRIVER, self._virtapi, xenapi_session=self._session) vif_impl = importutils.import_class(CONF.xenserver.vif_driver) self.vif_driver = vif_impl(xenapi_session=self._session) self.default_root_dev = '/dev/sda' LOG.debug(_("Importing image upload handler: %s"), CONF.xenserver.image_upload_handler) self.image_upload_handler = importutils.import_object( CONF.xenserver.image_upload_handler) def agent_enabled(self, instance): if CONF.xenserver.disable_agent: return False return xapi_agent.should_use_agent(instance) def _get_agent(self, instance, vm_ref): if self.agent_enabled(instance): return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi, instance, vm_ref) raise exception.NovaException(_("Error: Agent is disabled")) def instance_exists(self, name_label): return vm_utils.lookup(self._session, name_label) is not None def list_instances(self): """List VM instances.""" # TODO(justinsb): Should we just always use the details method? # Seems to be the same number of API calls.. name_labels = [] for vm_ref, vm_rec in vm_utils.list_vms(self._session): name_labels.append(vm_rec["name_label"]) return name_labels def list_instance_uuids(self): """Get the list of nova instance uuids for VMs found on the hypervisor. """ nova_uuids = [] for vm_ref, vm_rec in vm_utils.list_vms(self._session): other_config = vm_rec['other_config'] nova_uuid = other_config.get('nova_uuid') if nova_uuid: nova_uuids.append(nova_uuid) return nova_uuids def confirm_migration(self, migration, instance, network_info): self._destroy_orig_vm(instance, network_info) def _destroy_orig_vm(self, instance, network_info): name_label = self._get_orig_vm_name_label(instance) vm_ref = vm_utils.lookup(self._session, name_label) return self._destroy(instance, vm_ref, network_info=network_info) def _attach_mapped_block_devices(self, instance, block_device_info): # We are attaching these volumes before start (no hotplugging) # because some guests (windows) don't load PV drivers quickly block_device_mapping = virt_driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] mount_device = vol['mount_device'].rpartition("/")[2] self._volumeops.attach_volume(connection_info, instance['name'], mount_device, hotplug=False) def finish_revert_migration(self, context, instance, block_device_info=None, power_on=True): self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info, power_on) def _restore_orig_vm_and_cleanup_orphan(self, instance, block_device_info=None, power_on=True): # NOTE(sirp): the original vm was suffixed with '-orig'; find it using # the old suffix, remove the suffix, then power it back on. name_label = self._get_orig_vm_name_label(instance) vm_ref = vm_utils.lookup(self._session, name_label) # NOTE(danms): if we're reverting migration in the failure case, # make sure we don't have a conflicting vm still running here, # as might be the case in a failed migrate-to-same-host situation new_ref = vm_utils.lookup(self._session, instance['name']) if vm_ref is not None: if new_ref is not None: self._destroy(instance, new_ref) # Remove the '-orig' suffix (which was added in case the # resized VM ends up on the source host, common during # testing) name_label = instance['name'] vm_utils.set_vm_name_label(self._session, vm_ref, name_label) self._attach_mapped_block_devices(instance, block_device_info) elif new_ref is not None: # We crashed before the -orig backup was made vm_ref = new_ref if power_on and vm_utils.is_vm_shutdown(self._session, vm_ref): self._start(instance, vm_ref) def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): def null_step_decorator(f): return f def create_disks_step(undo_mgr, disk_image_type, image_meta, name_label): #TODO(johngarbutt) clean up if this is not run vdis = vm_utils.import_all_migrated_disks(self._session, instance) def undo_create_disks(): eph_vdis = vdis['ephemerals'] root_vdi = vdis['root'] vdi_refs = [vdi['ref'] for vdi in eph_vdis.values()] vdi_refs.append(root_vdi['ref']) vm_utils.safe_destroy_vdis(self._session, vdi_refs) undo_mgr.undo_with(undo_create_disks) return vdis def completed_callback(): self._update_instance_progress(context, instance, step=5, total_steps=RESIZE_TOTAL_STEPS) self._spawn(context, instance, image_meta, null_step_decorator, create_disks_step, first_boot=False, injected_files=None, admin_password=None, network_info=network_info, block_device_info=block_device_info, name_label=None, rescue=False, power_on=power_on, resize=resize_instance, completed_callback=completed_callback) def _start(self, instance, vm_ref=None, bad_volumes_callback=None): """Power on a VM instance.""" vm_ref = vm_ref or self._get_vm_opaque_ref(instance) LOG.debug(_("Starting instance"), instance=instance) # Attached volumes that have become non-responsive will prevent a VM # from starting, so scan for these before attempting to start # # In order to make sure this detach is consistent (virt, BDM, cinder), # we only detach in the virt-layer if a callback is provided. if bad_volumes_callback: bad_devices = self._volumeops.find_bad_volumes(vm_ref) for device_name in bad_devices: self._volumeops.detach_volume( None, instance['name'], device_name) self._session.call_xenapi('VM.start_on', vm_ref, self._session.host_ref, False, False) # Allow higher-layers a chance to detach bad-volumes as well (in order # to cleanup BDM entries and detach in Cinder) if bad_volumes_callback and bad_devices: bad_volumes_callback(bad_devices) def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None, name_label=None, rescue=False): if block_device_info: LOG.debug(_("Block device information present: %s"), block_device_info, instance=instance) if block_device_info and not block_device_info['root_device_name']: block_device_info['root_device_name'] = self.default_root_dev step = make_step_decorator(context, instance, self._update_instance_progress) @step def create_disks_step(undo_mgr, disk_image_type, image_meta, name_label): vdis = vm_utils.get_vdis_for_instance(context, self._session, instance, name_label, image_meta.get('id'), disk_image_type, block_device_info=block_device_info) def undo_create_disks(): vdi_refs = [vdi['ref'] for vdi in vdis.values() if not vdi.get('osvol')] vm_utils.safe_destroy_vdis(self._session, vdi_refs) undo_mgr.undo_with(undo_create_disks) return vdis self._spawn(context, instance, image_meta, step, create_disks_step, True, injected_files, admin_password, network_info, block_device_info, name_label, rescue) def _spawn(self, context, instance, image_meta, step, create_disks_step, first_boot, injected_files=None, admin_password=None, network_info=None, block_device_info=None, name_label=None, rescue=False, power_on=True, resize=True, completed_callback=None): if name_label is None: name_label = instance['name'] self._ensure_instance_name_unique(name_label) self._ensure_enough_free_mem(instance) def attach_disks(undo_mgr, vm_ref, vdis, disk_image_type): try: ipxe_boot = strutils.bool_from_string( image_meta['properties']['ipxe_boot']) except KeyError: ipxe_boot = False if ipxe_boot: if 'iso' in vdis: vm_utils.handle_ipxe_iso( self._session, instance, vdis['iso'], network_info) else: LOG.warning(_('ipxe_boot is True but no ISO image found'), instance=instance) if resize: self._resize_up_vdis(instance, vdis) self._attach_disks(instance, vm_ref, name_label, vdis, disk_image_type, network_info, admin_password, injected_files) if not first_boot: self._attach_mapped_block_devices(instance, block_device_info) def attach_pci_devices(undo_mgr, vm_ref): dev_to_passthrough = "" devices = pci_manager.get_instance_pci_devs(instance) for d in devices: pci_address = d["address"] if pci_address.count(":") == 1: pci_address = "0000:" + pci_address dev_to_passthrough += ",0/" + pci_address # Remove the first comma if string is not empty. # Note(guillaume-thouvenin): If dev_to_passthrough is empty, we # don't need to update other_config. if dev_to_passthrough: vm_utils.set_other_config_pci(self._session, vm_ref, dev_to_passthrough[1:]) @step def determine_disk_image_type_step(undo_mgr): return vm_utils.determine_disk_image_type(image_meta) @step def create_kernel_ramdisk_step(undo_mgr): kernel_file, ramdisk_file = vm_utils.create_kernel_and_ramdisk( context, self._session, instance, name_label) def undo_create_kernel_ramdisk(): vm_utils.destroy_kernel_ramdisk(self._session, instance, kernel_file, ramdisk_file) undo_mgr.undo_with(undo_create_kernel_ramdisk) return kernel_file, ramdisk_file @step def create_vm_record_step(undo_mgr, disk_image_type, kernel_file, ramdisk_file): vm_ref = self._create_vm_record(context, instance, name_label, disk_image_type, kernel_file, ramdisk_file, image_meta) def undo_create_vm(): self._destroy(instance, vm_ref, network_info=network_info) undo_mgr.undo_with(undo_create_vm) return vm_ref @step def attach_devices_step(undo_mgr, vm_ref, vdis, disk_image_type): attach_disks(undo_mgr, vm_ref, vdis, disk_image_type) attach_pci_devices(undo_mgr, vm_ref) if rescue: # NOTE(johannes): Attach root disk to rescue VM now, before # booting the VM, since we can't hotplug block devices # on non-PV guests @step def attach_root_disk_step(undo_mgr, vm_ref): vbd_ref = self._attach_orig_disk_for_rescue(instance, vm_ref) def undo_attach_root_disk(): # destroy the vbd in preparation to re-attach the VDI # to its original VM. (does not delete VDI) vm_utils.destroy_vbd(self._session, vbd_ref) undo_mgr.undo_with(undo_attach_root_disk) @step def inject_instance_data_step(undo_mgr, vm_ref, vdis): self._inject_instance_metadata(instance, vm_ref) self._inject_auto_disk_config(instance, vm_ref) # NOTE: We add the hostname here so windows PV tools # can pick it up during booting if first_boot: self._inject_hostname(instance, vm_ref, rescue) self._file_inject_vm_settings(instance, vm_ref, vdis, network_info) self.inject_network_info(instance, network_info, vm_ref) @step def setup_network_step(undo_mgr, vm_ref): self._create_vifs(instance, vm_ref, network_info) self._prepare_instance_filter(instance, network_info) @step def boot_instance_step(undo_mgr, vm_ref): if power_on: self._start(instance, vm_ref) self._wait_for_instance_to_start(instance, vm_ref) @step def configure_booted_instance_step(undo_mgr, vm_ref): if first_boot: self._configure_new_instance_with_agent(instance, vm_ref, injected_files, admin_password) self._remove_hostname(instance, vm_ref) @step def apply_security_group_filters_step(undo_mgr): self.firewall_driver.apply_instance_filter(instance, network_info) undo_mgr = utils.UndoManager() try: # NOTE(sirp): The create_disks() step will potentially take a # *very* long time to complete since it has to fetch the image # over the network and images can be several gigs in size. To # avoid progress remaining at 0% for too long, make sure the # first step is something that completes rather quickly. disk_image_type = determine_disk_image_type_step(undo_mgr) vdis = create_disks_step(undo_mgr, disk_image_type, image_meta, name_label) kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr) vm_ref = create_vm_record_step(undo_mgr, disk_image_type, kernel_file, ramdisk_file) attach_devices_step(undo_mgr, vm_ref, vdis, disk_image_type) inject_instance_data_step(undo_mgr, vm_ref, vdis) setup_network_step(undo_mgr, vm_ref) if rescue: attach_root_disk_step(undo_mgr, vm_ref) boot_instance_step(undo_mgr, vm_ref) configure_booted_instance_step(undo_mgr, vm_ref) apply_security_group_filters_step(undo_mgr) if completed_callback: completed_callback() except Exception: msg = _("Failed to spawn, rolling back") undo_mgr.rollback_and_reraise(msg=msg, instance=instance) def _attach_orig_disk_for_rescue(self, instance, vm_ref): orig_vm_ref = vm_utils.lookup(self._session, instance['name']) vdi_ref = self._find_root_vdi_ref(orig_vm_ref) return vm_utils.create_vbd(self._session, vm_ref, vdi_ref, DEVICE_RESCUE, bootable=False) def _file_inject_vm_settings(self, instance, vm_ref, vdis, network_info): if CONF.flat_injected: vm_utils.preconfigure_instance(self._session, instance, vdis['root']['ref'], network_info) def _ensure_instance_name_unique(self, name_label): vm_ref = vm_utils.lookup(self._session, name_label) if vm_ref is not None: raise exception.InstanceExists(name=name_label) def _ensure_enough_free_mem(self, instance): if not vm_utils.is_enough_free_mem(self._session, instance): raise exception.InsufficientFreeMemory(uuid=instance['uuid']) def _create_vm_record(self, context, instance, name_label, disk_image_type, kernel_file, ramdisk_file, image_meta): """Create the VM record in Xen, making sure that we do not create a duplicate name-label. Also do a rough sanity check on memory to try to short-circuit a potential failure later. (The memory check only accounts for running VMs, so it can miss other builds that are in progress.) """ mode = vm_utils.determine_vm_mode(instance, disk_image_type) if instance['vm_mode'] != mode: # Update database with normalized (or determined) value self._virtapi.instance_update(context, instance['uuid'], {'vm_mode': mode}) image_properties = image_meta.get("properties") device_id = vm_utils.get_vm_device_id(self._session, image_properties) use_pv_kernel = (mode == vm_mode.XEN) LOG.debug(_("Using PV kernel: %s"), use_pv_kernel, instance=instance) vm_ref = vm_utils.create_vm(self._session, instance, name_label, kernel_file, ramdisk_file, use_pv_kernel, device_id) return vm_ref def _attach_disks(self, instance, vm_ref, name_label, vdis, disk_image_type, network_info, admin_password=None, files=None): flavor = flavors.extract_flavor(instance) # Attach (required) root disk if disk_image_type == vm_utils.ImageType.DISK_ISO: # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk root_disk_size = flavor['root_gb'] if root_disk_size > 0: vm_utils.generate_iso_blank_root_disk(self._session, instance, vm_ref, DEVICE_ROOT, name_label, root_disk_size) cd_vdi = vdis.pop('iso') vm_utils.attach_cd(self._session, vm_ref, cd_vdi['ref'], DEVICE_CD) else: root_vdi = vdis['root'] if instance['auto_disk_config']: LOG.debug(_("Auto configuring disk, attempting to " "resize root disk..."), instance=instance) vm_utils.try_auto_configure_disk(self._session, root_vdi['ref'], flavor['root_gb']) vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'], DEVICE_ROOT, bootable=True, osvol=root_vdi.get('osvol')) # Attach (optional) additional block-devices for type_, vdi_info in vdis.items(): # Additional block-devices for boot use their device-name as the # type. if not type_.startswith('/dev'): continue # Convert device name to userdevice number, e.g. /dev/xvdb -> 1 userdevice = ord(block_device.strip_prefix(type_)) - ord('a') vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'], userdevice, bootable=False, osvol=vdi_info.get('osvol')) # Attach (optional) swap disk swap_mb = flavor['swap'] if swap_mb: vm_utils.generate_swap(self._session, instance, vm_ref, DEVICE_SWAP, name_label, swap_mb) ephemeral_gb = flavor['ephemeral_gb'] if ephemeral_gb: ephemeral_vdis = vdis.get('ephemerals') if ephemeral_vdis: # attach existing (migrated) ephemeral disks for userdevice, ephemeral_vdi in ephemeral_vdis.iteritems(): vm_utils.create_vbd(self._session, vm_ref, ephemeral_vdi['ref'], userdevice, bootable=False) else: # create specified ephemeral disks vm_utils.generate_ephemeral(self._session, instance, vm_ref, DEVICE_EPHEMERAL, name_label, ephemeral_gb) # Attach (optional) configdrive v2 disk if configdrive.required_by(instance): vm_utils.generate_configdrive(self._session, instance, vm_ref, DEVICE_CONFIGDRIVE, network_info, admin_password=admin_password, files=files) def _wait_for_instance_to_start(self, instance, vm_ref): LOG.debug(_('Waiting for instance state to become running'), instance=instance) expiration = time.time() + CONF.xenserver.running_timeout while time.time() < expiration: state = vm_utils.get_power_state(self._session, vm_ref) if state == power_state.RUNNING: break greenthread.sleep(0.5) def _configure_new_instance_with_agent(self, instance, vm_ref, injected_files, admin_password): if not self.agent_enabled(instance): LOG.debug(_("Skip agent setup, not enabled."), instance=instance) return agent = self._get_agent(instance, vm_ref) version = agent.get_version() if not version: LOG.debug(_("Skip agent setup, unable to contact agent."), instance=instance) return LOG.debug(_('Detected agent version: %s'), version, instance=instance) # NOTE(johngarbutt) the agent object allows all of # the following steps to silently fail agent.inject_ssh_key() if injected_files: agent.inject_files(injected_files) if admin_password: agent.set_admin_password(admin_password) agent.resetnetwork() agent.update_if_needed(version) def _prepare_instance_filter(self, instance, network_info): try: self.firewall_driver.setup_basic_filtering( instance, network_info) except NotImplementedError: # NOTE(salvatore-orlando): setup_basic_filtering might be # empty or not implemented at all, as basic filter could # be implemented with VIF rules created by xapi plugin pass self.firewall_driver.prepare_instance_filter(instance, network_info) def _get_vm_opaque_ref(self, instance, check_rescue=False): """Get xapi OpaqueRef from a db record. :param check_rescue: if True will return the 'name'-rescue vm if it exists, instead of just 'name' """ vm_ref = vm_utils.lookup(self._session, instance['name'], check_rescue) if vm_ref is None: raise exception.InstanceNotFound(instance_id=instance['name']) return vm_ref def _acquire_bootlock(self, vm): """Prevent an instance from booting.""" self._session.call_xenapi( "VM.set_blocked_operations", vm, {"start": ""}) def _release_bootlock(self, vm): """Allow an instance to boot.""" self._session.call_xenapi( "VM.remove_from_blocked_operations", vm, "start") def snapshot(self, context, instance, image_id, update_task_state): """Create snapshot from a running VM instance. :param context: request context :param instance: instance to be snapshotted :param image_id: id of image to upload to Steps involved in a XenServer snapshot: 1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, Snapshot VHD 2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to a 'base-copy' VDI. The base_copy is immutable and may be chained with other base_copies. If chained, the base_copies coalesce together, so, we must wait for this coalescing to occur to get a stable representation of the data on disk. 3. Push-to-data-store: Once coalesced, we call 'image_upload_handler' to upload the images. """ vm_ref = self._get_vm_opaque_ref(instance) label = "%s-snapshot" % instance['name'] with vm_utils.snapshot_attached_here( self._session, instance, vm_ref, label, post_snapshot_callback=update_task_state) as vdi_uuids: update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) self.image_upload_handler.upload_image(context, self._session, instance, vdi_uuids, image_id) LOG.debug(_("Finished snapshot and upload for VM"), instance=instance) def _get_orig_vm_name_label(self, instance): return instance['name'] + '-orig' def _update_instance_progress(self, context, instance, step, total_steps): """Update instance progress percent to reflect current step number """ # FIXME(sirp): for now we're taking a KISS approach to instance # progress: # Divide the action's workflow into discrete steps and "bump" the # instance's progress field as each step is completed. # # For a first cut this should be fine, however, for large VM images, # the get_vdis_for_instance step begins to dominate the equation. A # better approximation would use the percentage of the VM image that # has been streamed to the destination host. progress = round(float(step) / total_steps * 100) LOG.debug(_("Updating progress to %d"), progress, instance=instance) self._virtapi.instance_update(context, instance['uuid'], {'progress': progress}) def _resize_ensure_vm_is_shutdown(self, instance, vm_ref): if vm_utils.is_vm_shutdown(self._session, vm_ref): LOG.debug(_("VM was already shutdown."), instance=instance) return if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref): LOG.debug(_("Clean shutdown did not complete successfully, " "trying hard shutdown."), instance=instance) if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref): raise exception.ResizeError( reason=_("Unable to terminate instance.")) def _migrate_disk_resizing_down(self, context, instance, dest, flavor, vm_ref, sr_path): step = make_step_decorator(context, instance, self._update_instance_progress, total_offset=1) @step def fake_step_to_match_resizing_up(): pass @step def rename_and_power_off_vm(undo_mgr): self._resize_ensure_vm_is_shutdown(instance, vm_ref) self._apply_orig_vm_name_label(instance, vm_ref) def restore_orig_vm(): # Do not need to restore block devices, not yet been removed self._restore_orig_vm_and_cleanup_orphan(instance) undo_mgr.undo_with(restore_orig_vm) @step def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref): new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session, instance, old_vdi_ref, flavor) def cleanup_vdi_copy(): vm_utils.destroy_vdi(self._session, new_vdi_ref) undo_mgr.undo_with(cleanup_vdi_copy) return new_vdi_ref, new_vdi_uuid @step def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid): vm_utils.migrate_vhd(self._session, instance, new_vdi_uuid, dest, sr_path, 0) # Clean up VDI now that it's been copied vm_utils.destroy_vdi(self._session, new_vdi_ref) undo_mgr = utils.UndoManager() try: fake_step_to_match_resizing_up() rename_and_power_off_vm(undo_mgr) old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely( self._session, vm_ref) new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize( undo_mgr, old_vdi_ref) transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid) except Exception as error: LOG.exception(_("_migrate_disk_resizing_down failed. " "Restoring orig vm due_to: %s."), error, instance=instance) undo_mgr._rollback() raise exception.InstanceFaultRollback(error) def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref, sr_path): step = make_step_decorator(context, instance, self._update_instance_progress, total_offset=1) """ NOTE(johngarbutt) Understanding how resize up works. For resize up, we attempt to minimize the amount of downtime for users by copying snapshots of their disks, while their VM is still running. It is worth noting, that migrating the snapshot, means migrating the whole VHD chain up to, but not including, the leaf VHD the VM is still writing to. Once the snapshots have been migrated, we power down the VM and migrate all the disk changes since the snapshots were taken. In addition, the snapshots are taken at the latest possible point, to help minimize the time it takes to migrate the disk changes after the VM has been turned off. Before starting to migrate any of the disks, we rename the VM, to -orig, in case we attempt to migrate the VM back onto this host, and so once we have completed the migration of the disk, confirm/rollback migrate can work in the usual way. If there is a failure at any point, we need to rollback to the position we were in before starting to migrate. In particular, we need to delete and snapshot VDIs that may have been created, and restore the VM back to its original name. """ @step def fake_step_to_show_snapshot_complete(): pass @step def transfer_immutable_vhds(root_vdi_uuids): active_root_vdi_uuid = root_vdi_uuids[0] immutable_root_vdi_uuids = root_vdi_uuids[1:] for vhd_num, vdi_uuid in enumerate(immutable_root_vdi_uuids, start=1): vm_utils.migrate_vhd(self._session, instance, vdi_uuid, dest, sr_path, vhd_num) LOG.debug(_("Migrated root base vhds"), instance=instance) return active_root_vdi_uuid def _process_ephemeral_chain_recursive(ephemeral_chains, active_vdi_uuids): # This method is called several times, recursively. # The first phase snapshots the ephemeral disks, and # migrates the read only VHD files. # The final call into this method calls # power_down_and_transfer_leaf_vhds # to turn off the VM and copy the rest of the VHDs. number_of_chains = len(ephemeral_chains) if number_of_chains == 0: # If we get here, we have snapshotted and migrated # all the ephemeral disks, so its time to power down # and complete the migration of the diffs since the snapshot LOG.debug(_("Migrated all base vhds."), instance=instance) return power_down_and_transfer_leaf_vhds( active_root_vdi_uuid, active_vdi_uuids) remaining_chains = [] if number_of_chains > 1: remaining_chains = ephemeral_chains[1:] ephemeral_disk_index = len(active_vdi_uuids) userdevice = int(DEVICE_EPHEMERAL) + ephemeral_disk_index # Here we take a snapshot of the ephemeral disk, # and migrate all VHDs in the chain that are not being written to # Once that is completed, we call back into this method to either: # - migrate any remaining ephemeral disks # - or, if all disks are migrated, we power down and complete # the migration but copying the diffs since all the snapshots # were taken with vm_utils.snapshot_attached_here(self._session, instance, vm_ref, label, str(userdevice)) as chain_vdi_uuids: # remember active vdi, we will migrate these later active_vdi_uuids.append(chain_vdi_uuids[0]) # migrate inactive vhds inactive_vdi_uuids = chain_vdi_uuids[1:] ephemeral_disk_number = ephemeral_disk_index + 1 for seq_num, vdi_uuid in enumerate(inactive_vdi_uuids, start=1): vm_utils.migrate_vhd(self._session, instance, vdi_uuid, dest, sr_path, seq_num, ephemeral_disk_number) LOG.debug(_("Read-only migrated for disk: %s"), userdevice, instance=instance) # This is recursive to simplify the taking and cleaning up # of all the ephemeral disk snapshots return _process_ephemeral_chain_recursive(remaining_chains, active_vdi_uuids) @step def transfer_ephemeral_disks_then_all_leaf_vdis(): ephemeral_chains = vm_utils.get_all_vdi_uuids_for_vm( self._session, vm_ref, min_userdevice=int(DEVICE_EPHEMERAL)) if ephemeral_chains: ephemeral_chains = list(ephemeral_chains) else: ephemeral_chains = [] _process_ephemeral_chain_recursive(ephemeral_chains, []) @step def power_down_and_transfer_leaf_vhds(root_vdi_uuid, ephemeral_vdi_uuids=None): self._resize_ensure_vm_is_shutdown(instance, vm_ref) vm_utils.migrate_vhd(self._session, instance, root_vdi_uuid, dest, sr_path, 0) if ephemeral_vdi_uuids: for ephemeral_disk_number, ephemeral_vdi_uuid in enumerate( ephemeral_vdi_uuids, start=1): vm_utils.migrate_vhd(self._session, instance, ephemeral_vdi_uuid, dest, sr_path, 0, ephemeral_disk_number) self._apply_orig_vm_name_label(instance, vm_ref) try: label = "%s-snapshot" % instance['name'] with vm_utils.snapshot_attached_here( self._session, instance, vm_ref, label) as root_vdi_uuids: # NOTE(johngarbutt) snapshot attached here will delete # the snapshot if an error occurs fake_step_to_show_snapshot_complete() # transfer all the non-active VHDs in the root disk chain active_root_vdi_uuid = transfer_immutable_vhds(root_vdi_uuids) # snapshot and transfer all ephemeral disks # then power down and transfer any diffs since # the snapshots were taken transfer_ephemeral_disks_then_all_leaf_vdis() except Exception as error: LOG.exception(_("_migrate_disk_resizing_up failed. " "Restoring orig vm due_to: %s."), error, instance=instance) try: self._restore_orig_vm_and_cleanup_orphan(instance) #TODO(johngarbutt) should also cleanup VHDs at destination except Exception as rollback_error: LOG.warn(_("_migrate_disk_resizing_up failed to " "rollback: %s"), rollback_error, instance=instance) raise exception.InstanceFaultRollback(error) def _apply_orig_vm_name_label(self, instance, vm_ref): # NOTE(sirp): in case we're resizing to the same host (for dev # purposes), apply a suffix to name-label so the two VM records # extant until a confirm_resize don't collide. name_label = self._get_orig_vm_name_label(instance) vm_utils.set_vm_name_label(self._session, vm_ref, name_label) def _ensure_not_resize_down_ephemeral(self, instance, flavor): old_gb = instance["ephemeral_gb"] new_gb = flavor["ephemeral_gb"] if old_gb > new_gb: reason = _("Can't resize down ephemeral disks.") raise exception.ResizeError(reason) def migrate_disk_and_power_off(self, context, instance, dest, flavor, block_device_info): """Copies a VHD from one host machine to another, possibly resizing filesystem before hand. :param instance: the instance that owns the VHD in question. :param dest: the destination host machine. :param flavor: flavor to resize to """ self._ensure_not_resize_down_ephemeral(instance, flavor) # 0. Zero out the progress to begin self._update_instance_progress(context, instance, step=0, total_steps=RESIZE_TOTAL_STEPS) old_gb = instance['root_gb'] new_gb = flavor['root_gb'] resize_down = old_gb > new_gb if new_gb == 0 and old_gb != 0: reason = _("Can't resize a disk to 0 GB.") raise exception.ResizeError(reason=reason) vm_ref = self._get_vm_opaque_ref(instance) sr_path = vm_utils.get_sr_path(self._session) if resize_down: self._migrate_disk_resizing_down( context, instance, dest, flavor, vm_ref, sr_path) else: self._migrate_disk_resizing_up( context, instance, dest, vm_ref, sr_path) self._detach_block_devices_from_orig_vm(instance, block_device_info) # NOTE(sirp): disk_info isn't used by the xenapi driver, instead it # uses a staging-area (/images/instance) and sequence-numbered # VHDs to figure out how to reconstruct the VDI chain after syncing disk_info = {} return disk_info def _detach_block_devices_from_orig_vm(self, instance, block_device_info): block_device_mapping = virt_driver.block_device_info_get_mapping( block_device_info) name_label = self._get_orig_vm_name_label(instance) for vol in block_device_mapping: connection_info = vol['connection_info'] mount_device = vol['mount_device'].rpartition("/")[2] self._volumeops.detach_volume(connection_info, name_label, mount_device) def _resize_up_vdis(self, instance, vdis): new_root_gb = instance['root_gb'] root_vdi = vdis.get('root') if new_root_gb and root_vdi: vdi_ref = root_vdi['ref'] vm_utils.update_vdi_virtual_size(self._session, instance, vdi_ref, new_root_gb) ephemeral_vdis = vdis.get('ephemerals') if not ephemeral_vdis: # NOTE(johngarbutt) no existing (migrated) ephemeral disks # to resize, so nothing more to do here. return total_ephemeral_gb = instance['ephemeral_gb'] if total_ephemeral_gb: sizes = vm_utils.get_ephemeral_disk_sizes(total_ephemeral_gb) # resize existing (migrated) ephemeral disks, # and add any extra disks if required due to a # larger total_ephemeral_gb (resize down is not supported). for userdevice, new_size in enumerate(sizes, start=int(DEVICE_EPHEMERAL)): vdi = ephemeral_vdis.get(str(userdevice)) if vdi: vdi_ref = vdi['ref'] vm_utils.update_vdi_virtual_size(self._session, instance, vdi_ref, new_size) else: LOG.debug("Generating new ephemeral vdi %d during resize", userdevice, instance=instance) # NOTE(johngarbutt) we generate but don't attach # the new disk to make up any additional ephemeral space vdi_ref = vm_utils.generate_single_ephemeral( self._session, instance, None, userdevice, new_size) vdis[str(userdevice)] = {'ref': vdi_ref, 'generated': True} def reboot(self, instance, reboot_type, bad_volumes_callback=None): """Reboot VM instance.""" # Note (salvatore-orlando): security group rules are not re-enforced # upon reboot, since this action on the XenAPI drivers does not # remove existing filters vm_ref = self._get_vm_opaque_ref(instance, check_rescue=True) try: if reboot_type == "HARD": self._session.call_xenapi('VM.hard_reboot', vm_ref) else: self._session.call_xenapi('VM.clean_reboot', vm_ref) except self._session.XenAPI.Failure as exc: details = exc.details if (details[0] == 'VM_BAD_POWER_STATE' and details[-1] == 'halted'): LOG.info(_("Starting halted instance found during reboot"), instance=instance) self._start(instance, vm_ref=vm_ref, bad_volumes_callback=bad_volumes_callback) return elif details[0] == 'SR_BACKEND_FAILURE_46': LOG.warn(_("Reboot failed due to bad volumes, detaching bad" " volumes and starting halted instance"), instance=instance) self._start(instance, vm_ref=vm_ref, bad_volumes_callback=bad_volumes_callback) return else: raise def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance.""" if self.agent_enabled(instance): vm_ref = self._get_vm_opaque_ref(instance) agent = self._get_agent(instance, vm_ref) agent.set_admin_password(new_pass) else: raise NotImplementedError() def inject_file(self, instance, path, contents): """Write a file to the VM instance.""" if self.agent_enabled(instance): vm_ref = self._get_vm_opaque_ref(instance) agent = self._get_agent(instance, vm_ref) agent.inject_file(path, contents) else: raise NotImplementedError() @staticmethod def _sanitize_xenstore_key(key): """Xenstore only allows the following characters as keys: ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz 0123456789-/_@ So convert the others to _ Also convert / to _, because that is somewhat like a path separator. """ allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ" "abcdefghijklmnopqrstuvwxyz" "0123456789-_@") return ''.join([x in allowed_chars and x or '_' for x in key]) def _inject_instance_metadata(self, instance, vm_ref): """Inject instance metadata into xenstore.""" @utils.synchronized('xenstore-' + instance['uuid']) def store_meta(topdir, data_dict): for key, value in data_dict.items(): key = self._sanitize_xenstore_key(key) value = value or '' self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key), jsonutils.dumps(value)) # Store user metadata store_meta('vm-data/user-metadata', utils.instance_meta(instance)) def _inject_auto_disk_config(self, instance, vm_ref): """Inject instance's auto_disk_config attribute into xenstore.""" @utils.synchronized('xenstore-' + instance['uuid']) def store_auto_disk_config(key, value): value = value and True or False self._add_to_param_xenstore(vm_ref, key, str(value)) store_auto_disk_config('vm-data/auto-disk-config', instance['auto_disk_config']) def change_instance_metadata(self, instance, diff): """Apply changes to instance metadata to xenstore.""" try: vm_ref = self._get_vm_opaque_ref(instance) except exception.NotFound: # NOTE(johngarbutt) race conditions mean we can still get here # during operations where the VM is not present, like resize. # Skip the update when not possible, as the updated metadata will # get added when the VM is being booted up at the end of the # resize or rebuild. LOG.warn(_("Unable to update metadata, VM not found."), instance=instance, exc_info=True) return def process_change(location, change): if change[0] == '-': self._remove_from_param_xenstore(vm_ref, location) try: self._delete_from_xenstore(instance, location, vm_ref=vm_ref) except exception.InstanceNotFound: # If the VM is not running then no need to update # the live xenstore - the param xenstore will be # used next time the VM is booted pass elif change[0] == '+': self._add_to_param_xenstore(vm_ref, location, jsonutils.dumps(change[1])) try: self._write_to_xenstore(instance, location, change[1], vm_ref=vm_ref) except exception.InstanceNotFound: # If the VM is not running then no need to update # the live xenstore pass @utils.synchronized('xenstore-' + instance['uuid']) def update_meta(): for key, change in diff.items(): key = self._sanitize_xenstore_key(key) location = 'vm-data/user-metadata/%s' % key process_change(location, change) update_meta() def _find_root_vdi_ref(self, vm_ref): """Find and return the root vdi ref for a VM.""" if not vm_ref: return None vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) for vbd_uuid in vbd_refs: vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid) if vbd["userdevice"] == DEVICE_ROOT: return vbd["VDI"] raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) def _destroy_vdis(self, instance, vm_ref): """Destroys all VDIs associated with a VM.""" LOG.debug(_("Destroying VDIs"), instance=instance) vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref) if not vdi_refs: return for vdi_ref in vdi_refs: try: vm_utils.destroy_vdi(self._session, vdi_ref) except volume_utils.StorageError as exc: LOG.error(exc) def _destroy_kernel_ramdisk(self, instance, vm_ref): """Three situations can occur: 1. We have neither a ramdisk nor a kernel, in which case we are a RAW image and can omit this step 2. We have one or the other, in which case, we should flag as an error 3. We have both, in which case we safely remove both the kernel and the ramdisk. """ instance_uuid = instance['uuid'] if not instance['kernel_id'] and not instance['ramdisk_id']: # 1. No kernel or ramdisk LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk " "deletion"), instance=instance) return if not (instance['kernel_id'] and instance['ramdisk_id']): # 2. We only have kernel xor ramdisk raise exception.InstanceUnacceptable(instance_id=instance_uuid, reason=_("instance has a kernel or ramdisk but not both")) # 3. We have both kernel and ramdisk (kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session, vm_ref) if kernel or ramdisk: vm_utils.destroy_kernel_ramdisk(self._session, instance, kernel, ramdisk) LOG.debug(_("kernel/ramdisk files removed"), instance=instance) def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref): """Destroy a rescue instance.""" # Shutdown Rescue VM state = vm_utils.get_power_state(self._session, rescue_vm_ref) if state != power_state.SHUTDOWN: self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref) # Destroy Rescue VDIs vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref) root_vdi_ref = self._find_root_vdi_ref(original_vm_ref) vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref] vm_utils.safe_destroy_vdis(self._session, vdi_refs) # Destroy Rescue VM self._session.call_xenapi("VM.destroy", rescue_vm_ref) def destroy(self, instance, network_info, block_device_info=None, destroy_disks=True): """Destroy VM instance. This is the method exposed by xenapi_conn.destroy(). The rest of the destroy_* methods are internal. """ LOG.info(_("Destroying VM"), instance=instance) # We don't use _get_vm_opaque_ref because the instance may # truly not exist because of a failure during build. A valid # vm_ref is checked correctly where necessary. vm_ref = vm_utils.lookup(self._session, instance['name']) rescue_vm_ref = vm_utils.lookup(self._session, "%s-rescue" % instance['name']) if rescue_vm_ref: self._destroy_rescue_instance(rescue_vm_ref, vm_ref) # NOTE(sirp): `block_device_info` is not used, information about which # volumes should be detached is determined by the # VBD.other_config['osvol'] attribute return self._destroy(instance, vm_ref, network_info=network_info, destroy_disks=destroy_disks) def _destroy(self, instance, vm_ref, network_info=None, destroy_disks=True): """Destroys VM instance by performing: 1. A shutdown 2. Destroying associated VDIs. 3. Destroying kernel and ramdisk files (if necessary). 4. Destroying that actual VM record. """ if vm_ref is None: LOG.warning(_("VM is not present, skipping destroy..."), instance=instance) return vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) if destroy_disks: self._volumeops.detach_all(vm_ref) self._destroy_vdis(instance, vm_ref) self._destroy_kernel_ramdisk(instance, vm_ref) vm_utils.destroy_vm(self._session, instance, vm_ref) self.unplug_vifs(instance, network_info) self.firewall_driver.unfilter_instance( instance, network_info=network_info) def pause(self, instance): """Pause VM instance.""" vm_ref = self._get_vm_opaque_ref(instance) self._session.call_xenapi('VM.pause', vm_ref) def unpause(self, instance): """Unpause VM instance.""" vm_ref = self._get_vm_opaque_ref(instance) self._session.call_xenapi('VM.unpause', vm_ref) def suspend(self, instance): """Suspend the specified instance.""" vm_ref = self._get_vm_opaque_ref(instance) self._acquire_bootlock(vm_ref) self._session.call_xenapi('VM.suspend', vm_ref) def resume(self, instance): """Resume the specified instance.""" vm_ref = self._get_vm_opaque_ref(instance) self._release_bootlock(vm_ref) self._session.call_xenapi('VM.resume', vm_ref, False, True) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Rescue the specified instance. - shutdown the instance VM. - set 'bootlock' to prevent the instance from starting in rescue. - spawn a rescue VM (the vm name-label will be instance-N-rescue). """ rescue_name_label = '%s-rescue' % instance['name'] rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label) if rescue_vm_ref: raise RuntimeError(_("Instance is already in Rescue Mode: %s") % instance['name']) vm_ref = self._get_vm_opaque_ref(instance) vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) self._acquire_bootlock(vm_ref) self.spawn(context, instance, image_meta, [], rescue_password, network_info, name_label=rescue_name_label, rescue=True) def set_bootable(self, instance, is_bootable): """Set the ability to power on/off an instance.""" vm_ref = self._get_vm_opaque_ref(instance) if is_bootable: self._release_bootlock(vm_ref) else: self._acquire_bootlock(vm_ref) def unrescue(self, instance): """Unrescue the specified instance. - unplug the instance VM's disk from the rescue VM. - teardown the rescue VM. - release the bootlock to allow the instance VM to start. """ rescue_vm_ref = vm_utils.lookup(self._session, "%s-rescue" % instance['name']) if not rescue_vm_ref: raise exception.InstanceNotInRescueMode( instance_id=instance['uuid']) original_vm_ref = self._get_vm_opaque_ref(instance) self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref) self._release_bootlock(original_vm_ref) self._start(instance, original_vm_ref) def soft_delete(self, instance): """Soft delete the specified instance.""" try: vm_ref = self._get_vm_opaque_ref(instance) except exception.NotFound: LOG.warning(_("VM is not present, skipping soft delete..."), instance=instance) else: vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) self._acquire_bootlock(vm_ref) def restore(self, instance): """Restore the specified instance.""" vm_ref = self._get_vm_opaque_ref(instance) self._release_bootlock(vm_ref) self._start(instance, vm_ref) def power_off(self, instance): """Power off the specified instance.""" vm_ref = self._get_vm_opaque_ref(instance) vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) def power_on(self, instance): """Power on the specified instance.""" vm_ref = self._get_vm_opaque_ref(instance) self._start(instance, vm_ref) def _cancel_stale_tasks(self, timeout, task): """Cancel the given tasks that are older than the given timeout.""" task_refs = self._session.call_xenapi("task.get_by_name_label", task) for task_ref in task_refs: task_rec = self._session.call_xenapi("task.get_record", task_ref) task_created = timeutils.parse_strtime(task_rec["created"].value, "%Y%m%dT%H:%M:%SZ") if timeutils.is_older_than(task_created, timeout): self._session.call_xenapi("task.cancel", task_ref) def poll_rebooting_instances(self, timeout, instances): """Look for expirable rebooting instances. - issue a "hard" reboot to any instance that has been stuck in a reboot state for >= the given timeout """ # NOTE(jk0): All existing clean_reboot tasks must be cancelled before # we can kick off the hard_reboot tasks. self._cancel_stale_tasks(timeout, 'VM.clean_reboot') ctxt = nova_context.get_admin_context() instances_info = dict(instance_count=len(instances), timeout=timeout) if instances_info["instance_count"] > 0: LOG.info(_("Found %(instance_count)d hung reboots " "older than %(timeout)d seconds") % instances_info) for instance in instances: LOG.info(_("Automatically hard rebooting"), instance=instance) self.compute_api.reboot(ctxt, instance, "HARD") def get_info(self, instance, vm_ref=None): """Return data about VM instance.""" vm_ref = vm_ref or self._get_vm_opaque_ref(instance) return vm_utils.compile_info(self._session, vm_ref) def get_diagnostics(self, instance): """Return data about VM diagnostics.""" vm_ref = self._get_vm_opaque_ref(instance) vm_rec = self._session.call_xenapi("VM.get_record", vm_ref) return vm_utils.compile_diagnostics(vm_rec) def _get_vif_device_map(self, vm_rec): vif_map = {} for vif in [self._session.call_xenapi("VIF.get_record", vrec) for vrec in vm_rec['VIFs']]: vif_map[vif['device']] = vif['MAC'] return vif_map def get_all_bw_counters(self): """Return running bandwidth counter for each interface on each running VM. """ counters = vm_utils.fetch_bandwidth(self._session) bw = {} for vm_ref, vm_rec in vm_utils.list_vms(self._session): vif_map = self._get_vif_device_map(vm_rec) name = vm_rec['name_label'] if 'nova_uuid' not in vm_rec['other_config']: continue dom = vm_rec.get('domid') if dom is None or dom not in counters: continue vifs_bw = bw.setdefault(name, {}) for vif_num, vif_data in counters[dom].iteritems(): mac = vif_map[vif_num] vif_data['mac_address'] = mac vifs_bw[mac] = vif_data return bw def get_console_output(self, instance): """Return last few lines of instance console.""" dom_id = self._get_dom_id(instance, check_rescue=True) try: raw_console_data = self._session.call_plugin('console', 'get_console_log', {'dom_id': dom_id}) except self._session.XenAPI.Failure as exc: LOG.exception(exc) msg = _("Guest does not have a console available") raise exception.NovaException(msg) return zlib.decompress(base64.b64decode(raw_console_data)) def get_vnc_console(self, instance): """Return connection info for a vnc console.""" if instance.vm_state == vm_states.RESCUED: name = '%s-rescue' % instance.name vm_ref = vm_utils.lookup(self._session, name) if vm_ref is None: # The rescue instance might not be ready at this point. raise exception.InstanceNotReady(instance_id=instance.uuid) else: vm_ref = vm_utils.lookup(self._session, instance.name) if vm_ref is None: # The compute manager expects InstanceNotFound for this case. raise exception.InstanceNotFound(instance_id=instance.uuid) session_id = self._session.get_session_id() path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id) # NOTE: XS5.6sp2+ use http over port 80 for xenapi com return {'host': CONF.vncserver_proxyclient_address, 'port': 80, 'internal_access_path': path} def _vif_xenstore_data(self, vif): """convert a network info vif to injectable instance data.""" def get_ip(ip): if not ip: return None return ip['address'] def fixed_ip_dict(ip, subnet): if ip['version'] == 4: netmask = str(subnet.as_netaddr().netmask) else: netmask = subnet.as_netaddr()._prefixlen return {'ip': ip['address'], 'enabled': '1', 'netmask': netmask, 'gateway': get_ip(subnet['gateway'])} def convert_route(route): return {'route': str(netaddr.IPNetwork(route['cidr']).network), 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask), 'gateway': get_ip(route['gateway'])} network = vif['network'] v4_subnets = [subnet for subnet in network['subnets'] if subnet['version'] == 4] v6_subnets = [subnet for subnet in network['subnets'] if subnet['version'] == 6] # NOTE(tr3buchet): routes and DNS come from all subnets routes = [convert_route(route) for subnet in network['subnets'] for route in subnet['routes']] dns = [get_ip(ip) for subnet in network['subnets'] for ip in subnet['dns']] info_dict = {'label': network['label'], 'mac': vif['address']} if v4_subnets: # NOTE(tr3buchet): gateway and broadcast from first subnet # primary IP will be from first subnet # subnets are generally unordered :( info_dict['gateway'] = get_ip(v4_subnets[0]['gateway']) info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast) info_dict['ips'] = [fixed_ip_dict(ip, subnet) for subnet in v4_subnets for ip in subnet['ips']] if v6_subnets: # NOTE(tr3buchet): gateway from first subnet # primary IP will be from first subnet # subnets are generally unordered :( info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway']) info_dict['ip6s'] = [fixed_ip_dict(ip, subnet) for subnet in v6_subnets for ip in subnet['ips']] if routes: info_dict['routes'] = routes if dns: info_dict['dns'] = list(set(dns)) return info_dict def inject_network_info(self, instance, network_info, vm_ref=None): """Generate the network info and make calls to place it into the xenstore and the xenstore param list. vm_ref can be passed in because it will sometimes be different than what vm_utils.lookup(session, instance['name']) will find (ex: rescue) """ vm_ref = vm_ref or self._get_vm_opaque_ref(instance) LOG.debug(_("Injecting network info to xenstore"), instance=instance) @utils.synchronized('xenstore-' + instance['uuid']) def update_nwinfo(): for vif in network_info: xs_data = self._vif_xenstore_data(vif) location = ('vm-data/networking/%s' % vif['address'].replace(':', '')) self._add_to_param_xenstore(vm_ref, location, jsonutils.dumps(xs_data)) try: self._write_to_xenstore(instance, location, xs_data, vm_ref=vm_ref) except exception.InstanceNotFound: # If the VM is not running, no need to update the # live xenstore pass update_nwinfo() def _create_vifs(self, instance, vm_ref, network_info): """Creates vifs for an instance.""" LOG.debug(_("Creating vifs"), instance=instance) # this function raises if vm_ref is not a vm_opaque_ref self._session.call_xenapi("VM.get_domid", vm_ref) for device, vif in enumerate(network_info): vif_rec = self.vif_driver.plug(instance, vif, vm_ref=vm_ref, device=device) network_ref = vif_rec['network'] LOG.debug(_('Creating VIF for network %s'), network_ref, instance=instance) vif_ref = self._session.call_xenapi('VIF.create', vif_rec) LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'), {'vif_ref': vif_ref, 'network_ref': network_ref}, instance=instance) def plug_vifs(self, instance, network_info): """Set up VIF networking on the host.""" for device, vif in enumerate(network_info): self.vif_driver.plug(instance, vif, device=device) def unplug_vifs(self, instance, network_info): if network_info: for vif in network_info: self.vif_driver.unplug(instance, vif) def reset_network(self, instance, rescue=False): """Calls resetnetwork method in agent.""" if self.agent_enabled(instance): vm_ref = self._get_vm_opaque_ref(instance) agent = self._get_agent(instance, vm_ref) self._inject_hostname(instance, vm_ref, rescue) agent.resetnetwork() self._remove_hostname(instance, vm_ref) else: raise NotImplementedError() def _inject_hostname(self, instance, vm_ref, rescue): """Inject the hostname of the instance into the xenstore.""" hostname = instance['hostname'] if rescue: hostname = 'RESCUE-%s' % hostname if instance['os_type'] == "windows": # NOTE(jk0): Windows hostnames can only be <= 15 chars. hostname = hostname[:15] LOG.debug(_("Injecting hostname (%s) into xenstore"), hostname, instance=instance) @utils.synchronized('xenstore-' + instance['uuid']) def update_hostname(): self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname) update_hostname() def _remove_hostname(self, instance, vm_ref): LOG.debug(_("Removing hostname from xenstore"), instance=instance) @utils.synchronized('xenstore-' + instance['uuid']) def update_hostname(): self._remove_from_param_xenstore(vm_ref, 'vm-data/hostname') update_hostname() def _write_to_xenstore(self, instance, path, value, vm_ref=None): """Writes the passed value to the xenstore record for the given VM at the specified location. A XenAPIPlugin.PluginError will be raised if any error is encountered in the write process. """ return self._make_plugin_call('xenstore.py', 'write_record', instance, vm_ref=vm_ref, path=path, value=jsonutils.dumps(value)) def _delete_from_xenstore(self, instance, path, vm_ref=None): """Deletes the value from the xenstore record for the given VM at the specified location. A XenAPIPlugin.PluginError will be raised if any error is encountered in the delete process. """ return self._make_plugin_call('xenstore.py', 'delete_record', instance, vm_ref=vm_ref, path=path) def _make_plugin_call(self, plugin, method, instance=None, vm_ref=None, **addl_args): """Abstracts out the process of calling a method of a xenapi plugin. Any errors raised by the plugin will in turn raise a RuntimeError here. """ args = {} if instance or vm_ref: args['dom_id'] = self._get_dom_id(instance, vm_ref) args.update(addl_args) try: return self._session.call_plugin(plugin, method, args) except self._session.XenAPI.Failure as e: err_msg = e.details[-1].splitlines()[-1] if 'TIMEOUT:' in err_msg: LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' 'args=%(args)r'), {'method': method, 'args': args}, instance=instance) return {'returncode': 'timeout', 'message': err_msg} elif 'NOT IMPLEMENTED:' in err_msg: LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not' ' supported by the agent. args=%(args)r'), {'method': method, 'args': args}, instance=instance) return {'returncode': 'notimplemented', 'message': err_msg} else: LOG.error(_('The call to %(method)s returned an error: %(e)s. ' 'args=%(args)r'), {'method': method, 'args': args, 'e': e}, instance=instance) return {'returncode': 'error', 'message': err_msg} def _get_dom_id(self, instance=None, vm_ref=None, check_rescue=False): vm_ref = vm_ref or self._get_vm_opaque_ref(instance, check_rescue) domid = self._session.call_xenapi("VM.get_domid", vm_ref) if not domid or domid == -1: raise exception.InstanceNotFound(instance_id=instance['name']) return domid def _add_to_param_xenstore(self, vm_ref, key, val): """Takes a key/value pair and adds it to the xenstore parameter record for the given vm instance. If the key exists in xenstore, it is overwritten """ self._remove_from_param_xenstore(vm_ref, key) self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val) def _remove_from_param_xenstore(self, vm_ref, key): """Takes a single key and removes it from the xenstore parameter record data for the given VM. If the key doesn't exist, the request is ignored. """ self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key) def refresh_security_group_rules(self, security_group_id): """recreates security group rules for every instance.""" self.firewall_driver.refresh_security_group_rules(security_group_id) def refresh_security_group_members(self, security_group_id): """recreates security group rules for every instance.""" self.firewall_driver.refresh_security_group_members(security_group_id) def refresh_instance_security_rules(self, instance): """recreates security group rules for specified instance.""" self.firewall_driver.refresh_instance_security_rules(instance) def refresh_provider_fw_rules(self): self.firewall_driver.refresh_provider_fw_rules() def unfilter_instance(self, instance_ref, network_info): """Removes filters for each VIF of the specified instance.""" self.firewall_driver.unfilter_instance(instance_ref, network_info=network_info) def _get_host_uuid_from_aggregate(self, context, hostname): current_aggregate = aggregate_obj.AggregateList.get_by_host( context, CONF.host, key=pool_states.POOL_FLAG)[0] if not current_aggregate: raise exception.AggregateHostNotFound(host=CONF.host) try: return current_aggregate.metadata[hostname] except KeyError: reason = _('Destination host:%s must be in the same ' 'aggregate as the source server') % hostname raise exception.MigrationPreCheckError(reason=reason) def _ensure_host_in_aggregate(self, context, hostname): self._get_host_uuid_from_aggregate(context, hostname) def _get_host_opaque_ref(self, context, hostname): host_uuid = self._get_host_uuid_from_aggregate(context, hostname) return self._session.call_xenapi("host.get_by_uuid", host_uuid) def _migrate_receive(self, ctxt): destref = self._session.host_ref # Get the network to for migrate. # This is the one associated with the pif marked management. From cli: # uuid=`xe pif-list --minimal management=true` # xe pif-param-get param-name=network-uuid uuid=$uuid expr = 'field "management" = "true"' pifs = self._session.call_xenapi('PIF.get_all_records_where', expr) if len(pifs) != 1: msg = _('No suitable network for migrate') raise exception.MigrationPreCheckError(reason=msg) pifkey = pifs.keys()[0] if not (utils.is_valid_ipv4(pifs[pifkey]['IP']) or utils.is_valid_ipv6(pifs[pifkey]['IPv6'])): msg = (_('PIF %s does not contain IP address') % pifs[pifkey]['uuid']) raise exception.MigrationPreCheckError(reason=msg) nwref = pifs[pifs.keys()[0]]['network'] try: options = {} migrate_data = self._session.call_xenapi("host.migrate_receive", destref, nwref, options) except self._session.XenAPI.Failure as exc: LOG.exception(exc) msg = _('Migrate Receive failed') raise exception.MigrationPreCheckError(reason=msg) return migrate_data def _get_iscsi_srs(self, ctxt, instance_ref): vm_ref = self._get_vm_opaque_ref(instance_ref) vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) iscsi_srs = [] for vbd_ref in vbd_refs: vdi_ref = self._session.call_xenapi("VBD.get_VDI", vbd_ref) # Check if it's on an iSCSI SR sr_ref = self._session.call_xenapi("VDI.get_SR", vdi_ref) if self._session.call_xenapi("SR.get_type", sr_ref) == 'iscsi': iscsi_srs.append(sr_ref) return iscsi_srs def check_can_live_migrate_destination(self, ctxt, instance_ref, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit """ dest_check_data = {} if block_migration: migrate_send_data = self._migrate_receive(ctxt) destination_sr_ref = vm_utils.safe_find_sr(self._session) dest_check_data.update( {"block_migration": block_migration, "migrate_data": {"migrate_send_data": migrate_send_data, "destination_sr_ref": destination_sr_ref}}) else: src = instance_ref['host'] self._ensure_host_in_aggregate(ctxt, src) # TODO(johngarbutt) we currently assume # instance is on a SR shared with other destination # block migration work will be able to resolve this return dest_check_data def _is_xsm_sr_check_relaxed(self): try: return self.cached_xsm_sr_relaxed except AttributeError: config_value = None try: config_value = self._make_plugin_call('config_file', 'get_val', key='relax-xsm-sr-check') except Exception as exc: LOG.exception(exc) self.cached_xsm_sr_relaxed = config_value == "true" return self.cached_xsm_sr_relaxed def check_can_live_migrate_source(self, ctxt, instance_ref, dest_check_data): """Check if it's possible to execute live migration on the source side. :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest_check_data: data returned by the check on the destination, includes block_migration flag """ if len(self._get_iscsi_srs(ctxt, instance_ref)) > 0: # XAPI must support the relaxed SR check for live migrating with # iSCSI VBDs if not self._is_xsm_sr_check_relaxed(): raise exception.MigrationError(_('XAPI supporting ' 'relax-xsm-sr-check=true required')) if 'migrate_data' in dest_check_data: vm_ref = self._get_vm_opaque_ref(instance_ref) migrate_data = dest_check_data['migrate_data'] try: self._call_live_migrate_command( "VM.assert_can_migrate", vm_ref, migrate_data) except self._session.XenAPI.Failure as exc: reason = exc.details[0] msg = _('assert_can_migrate failed because: %s') % reason LOG.debug(msg, exc_info=True) raise exception.MigrationPreCheckError(reason=msg) return dest_check_data def _generate_vdi_map(self, destination_sr_ref, vm_ref, sr_ref=None): """generate a vdi_map for _call_live_migrate_command.""" if sr_ref is None: sr_ref = vm_utils.safe_find_sr(self._session) vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session, vm_ref, sr_ref) return dict((vdi, destination_sr_ref) for vdi in vm_vdis) def _call_live_migrate_command(self, command_name, vm_ref, migrate_data): """unpack xapi specific parameters, and call a live migrate command.""" destination_sr_ref = migrate_data['destination_sr_ref'] migrate_send_data = migrate_data['migrate_send_data'] vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref) # Add destination SR refs for all of the VDIs that we created # as part of the pre migration callback if 'pre_live_migration_result' in migrate_data: pre_migrate_data = migrate_data['pre_live_migration_result'] sr_uuid_map = pre_migrate_data.get('sr_uuid_map', []) for sr_uuid in sr_uuid_map: # Source and destination SRs have the same UUID, so get the # reference for the local SR sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid) vdi_map.update( self._generate_vdi_map( sr_uuid_map[sr_uuid], vm_ref, sr_ref)) vif_map = {} options = {} self._session.call_xenapi(command_name, vm_ref, migrate_send_data, True, vdi_map, vif_map, options) def live_migrate(self, context, instance, destination_hostname, post_method, recover_method, block_migration, migrate_data=None): try: vm_ref = self._get_vm_opaque_ref(instance) if migrate_data is not None: (kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk( self._session, vm_ref) migrate_data['kernel-file'] = kernel migrate_data['ramdisk-file'] = ramdisk if block_migration: if not migrate_data: raise exception.InvalidParameterValue('Block Migration ' 'requires migrate data from destination') iscsi_srs = self._get_iscsi_srs(context, instance) try: self._call_live_migrate_command( "VM.migrate_send", vm_ref, migrate_data) except self._session.XenAPI.Failure as exc: LOG.exception(exc) raise exception.MigrationError(_('Migrate Send failed')) # Tidy up the iSCSI SRs for sr_ref in iscsi_srs: volume_utils.forget_sr(self._session, sr_ref) else: host_ref = self._get_host_opaque_ref(context, destination_hostname) self._session.call_xenapi("VM.pool_migrate", vm_ref, host_ref, {"live": "true"}) post_method(context, instance, destination_hostname, block_migration, migrate_data) except Exception: with excutils.save_and_reraise_exception(): recover_method(context, instance, destination_hostname, block_migration) def post_live_migration(self, context, instance, migrate_data=None): if migrate_data is not None: vm_utils.destroy_kernel_ramdisk(self._session, instance, migrate_data.get('kernel-file'), migrate_data.get('ramdisk-file')) def post_live_migration_at_destination(self, context, instance, network_info, block_migration, block_device_info): # FIXME(johngarbutt): we should block all traffic until we have # applied security groups, however this requires changes to XenServer self._prepare_instance_filter(instance, network_info) self.firewall_driver.apply_instance_filter(instance, network_info) vm_utils.create_kernel_and_ramdisk(context, self._session, instance, instance['name']) # NOTE(johngarbutt) workaround XenServer bug CA-98606 vm_ref = self._get_vm_opaque_ref(instance) vm_utils.strip_base_mirror_from_vdis(self._session, vm_ref) def get_per_instance_usage(self): """Get usage info about each active instance.""" usage = {} def _is_active(vm_rec): power_state = vm_rec['power_state'].lower() return power_state in ['running', 'paused'] def _get_uuid(vm_rec): other_config = vm_rec['other_config'] return other_config.get('nova_uuid', None) for vm_ref, vm_rec in vm_utils.list_vms(self._session): uuid = _get_uuid(vm_rec) if _is_active(vm_rec) and uuid is not None: memory_mb = int(vm_rec['memory_static_max']) / units.Mi usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid} return usage def attach_block_device_volumes(self, block_device_info): sr_uuid_map = {} try: if block_device_info is not None: for block_device_map in block_device_info[ 'block_device_mapping']: sr_uuid, _ = self._volumeops.attach_volume( block_device_map['connection_info'], None, block_device_map['mount_device'], hotplug=False) sr_ref = self._session.call_xenapi('SR.get_by_uuid', sr_uuid) sr_uuid_map[sr_uuid] = sr_ref except Exception: with excutils.save_and_reraise_exception(): # Disconnect the volumes we just connected for sr in sr_uuid_map: volume_utils.forget_sr(self._session, sr_uuid_map[sr_ref]) return sr_uuid_map nova-2014.1.5/nova/virt/xenapi/__init__.py0000664000567000056700000000147712540642532021407 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI ================================================================== """ from nova.virt.xenapi import driver XenAPIDriver = driver.XenAPIDriver nova-2014.1.5/nova/virt/xenapi/volumeops.py0000664000567000056700000002070612540642544021700 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for Storage-related functions (attach, detach, etc). """ from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.xenapi import vm_utils from nova.virt.xenapi import volume_utils LOG = logging.getLogger(__name__) class VolumeOps(object): """Management class for Volume-related tasks.""" def __init__(self, session): self._session = session def attach_volume(self, connection_info, instance_name, mountpoint, hotplug=True): """Attach volume storage to VM instance.""" # NOTE: No Resource Pool concept so far LOG.debug(_('Attach_volume: %(connection_info)s, %(instance_name)s,' '" %(mountpoint)s'), {'connection_info': connection_info, 'instance_name': instance_name, 'mountpoint': mountpoint}) dev_number = volume_utils.get_device_number(mountpoint) vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) sr_uuid, vdi_uuid = self._connect_volume(connection_info, dev_number, instance_name, vm_ref, hotplug=hotplug) LOG.info(_('Mountpoint %(mountpoint)s attached to' ' instance %(instance_name)s'), {'instance_name': instance_name, 'mountpoint': mountpoint}) return (sr_uuid, vdi_uuid) def connect_volume(self, connection_info): """Attach volume storage to the hypervisor without attaching to a VM Used to attach the just the SR - e.g. for during live migration """ # NOTE: No Resource Pool concept so far LOG.debug(_("Connect_volume: %s"), connection_info) sr_uuid, vdi_uuid = self._connect_volume(connection_info, None, None, None, False) return (sr_uuid, vdi_uuid) def _connect_volume(self, connection_info, dev_number=None, instance_name=None, vm_ref=None, hotplug=True): driver_type = connection_info['driver_volume_type'] if driver_type not in ['iscsi', 'xensm']: raise exception.VolumeDriverNotFound(driver_type=driver_type) connection_data = connection_info['data'] sr_uuid, sr_label, sr_params = volume_utils.parse_sr_info( connection_data, 'Disk-for:%s' % instance_name) # Introduce SR if not already present sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid) if not sr_ref: sr_ref = volume_utils.introduce_sr( self._session, sr_uuid, sr_label, sr_params) try: # Introduce VDI if 'vdi_uuid' in connection_data: vdi_ref = volume_utils.introduce_vdi( self._session, sr_ref, vdi_uuid=connection_data['vdi_uuid']) elif 'target_lun' in connection_data: vdi_ref = volume_utils.introduce_vdi( self._session, sr_ref, target_lun=connection_data['target_lun']) else: # NOTE(sirp): This will introduce the first VDI in the SR vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref) # Attach if vm_ref: vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref, dev_number, bootable=False, osvol=True) running = not vm_utils.is_vm_shutdown(self._session, vm_ref) if hotplug and running: self._session.VBD.plug(vbd_ref, vm_ref) vdi_uuid = self._session.call_xenapi("VDI.get_uuid", vdi_ref) return (sr_uuid, vdi_uuid) except Exception: with excutils.save_and_reraise_exception(): # NOTE(sirp): Forgetting the SR will have the effect of # cleaning up the VDI and VBD records, so no need to handle # that explicitly. volume_utils.forget_sr(self._session, sr_ref) def detach_volume(self, connection_info, instance_name, mountpoint): """Detach volume storage to VM instance.""" LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s"), {'instance_name': instance_name, 'mountpoint': mountpoint}) device_number = volume_utils.get_device_number(mountpoint) vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name) try: vbd_ref = vm_utils.find_vbd_by_number( self._session, vm_ref, device_number) except volume_utils.StorageError: # NOTE(sirp): If we don't find the VBD then it must have been # detached previously. LOG.warn(_('Skipping detach because VBD for %s was' ' not found'), instance_name) return # Unplug VBD if we're NOT shutdown unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref) self._detach_vbd(vbd_ref, unplug, vm_ref) LOG.info(_('Mountpoint %(mountpoint)s detached from instance' ' %(instance_name)s'), {'instance_name': instance_name, 'mountpoint': mountpoint}) def _get_all_volume_vbd_refs(self, vm_ref): """Return VBD refs for all Nova/Cinder volumes.""" vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref) for vbd_ref in vbd_refs: other_config = self._session.call_xenapi( "VBD.get_other_config", vbd_ref) if other_config.get('osvol'): yield vbd_ref def _detach_vbd(self, vbd_ref, unplug, vm_ref): if unplug: vm_utils.unplug_vbd(self._session, vbd_ref, vm_ref) sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) vm_utils.destroy_vbd(self._session, vbd_ref) # Forget SR only if not in use volume_utils.purge_sr(self._session, sr_ref) def detach_all(self, vm_ref): """Detach any external nova/cinder volumes and purge the SRs.""" # Generally speaking, detach_all will be called with VM already # shutdown; however if it's still running, we can still perform the # operation by unplugging the VBD first. unplug = not vm_utils.is_vm_shutdown(self._session, vm_ref) vbd_refs = self._get_all_volume_vbd_refs(vm_ref) for vbd_ref in vbd_refs: self._detach_vbd(vbd_ref, unplug, vm_ref) def find_bad_volumes(self, vm_ref): """Find any volumes with their connection severed. Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not work when a VBD is present that points to a non-working volume. To work around this, we scan for non-working volumes and detach them before retrying a failed operation. """ bad_devices = [] vbd_refs = self._get_all_volume_vbd_refs(vm_ref) for vbd_ref in vbd_refs: sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) try: # TODO(sirp): bug1152401 This relies on a 120 sec timeout # within XenServer, update this to fail-fast when this is fixed # upstream self._session.call_xenapi("SR.scan", sr_ref) except self._session.XenAPI.Failure as exc: if exc.details[0] == 'SR_BACKEND_FAILURE_40': vbd_rec = vbd_rec = self._session.call_xenapi( "VBD.get_record", vbd_ref) bad_devices.append('/dev/%s' % vbd_rec['device']) else: raise return bad_devices nova-2014.1.5/nova/virt/xenapi/host.py0000664000567000056700000003410512540642544020622 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Citrix Systems, Inc. # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for host-related functions (start, reboot, etc). """ import re from nova.compute import task_states from nova.compute import vm_states from nova import conductor from nova import context from nova import exception from nova.objects import aggregate as aggregate_obj from nova.objects import instance as instance_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.pci import pci_whitelist from nova.virt.xenapi import pool_states from nova.virt.xenapi import vm_utils LOG = logging.getLogger(__name__) class Host(object): """Implements host related operations.""" def __init__(self, session, virtapi): self._session = session self._virtapi = virtapi self._conductor_api = conductor.API() def host_power_action(self, _host, action): """Reboots or shuts down the host.""" args = {"action": jsonutils.dumps(action)} methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"} response = call_xenhost(self._session, methods[action], args) return response.get("power_action", response) def host_maintenance_mode(self, host, mode): """Start/Stop host maintenance window. On start, it triggers guest VMs evacuation. """ if not mode: return 'off_maintenance' host_list = [host_ref for host_ref in self._session.host.get_all() if host_ref != self._session.host_ref] migrations_counter = vm_counter = 0 ctxt = context.get_admin_context() for vm_ref, vm_rec in vm_utils.list_vms(self._session): for host_ref in host_list: try: # Ensure only guest instances are migrated uuid = vm_rec['other_config'].get('nova_uuid') if not uuid: name = vm_rec['name_label'] uuid = _uuid_find(ctxt, host, name) if not uuid: LOG.info(_('Instance %(name)s running on %(host)s' ' could not be found in the database:' ' assuming it is a worker VM and skip' ' ping migration to a new host'), {'name': name, 'host': host}) continue instance = instance_obj.Instance.get_by_uuid(ctxt, uuid) vm_counter = vm_counter + 1 aggregate = aggregate_obj.AggregateList.get_by_host( ctxt, host, key=pool_states.POOL_FLAG) if not aggregate: msg = _('Aggregate for host %(host)s count not be' ' found.') % dict(host=host) raise exception.NotFound(msg) dest = _host_find(ctxt, self._session, aggregate[0], host_ref) instance.host = dest instance.task_state = task_states.MIGRATING instance.save() self._session.VM.pool_migrate(vm_ref, host_ref, {"live": "true"}) migrations_counter = migrations_counter + 1 instance.vm_state = vm_states.ACTIVE instance.save() break except self._session.XenAPI.Failure: LOG.exception(_('Unable to migrate VM %(vm_ref)s ' 'from %(host)s'), {'vm_ref': vm_ref, 'host': host}) instance.host = host instance.vm_state = vm_states.ACTIVE instance.save() if vm_counter == migrations_counter: return 'on_maintenance' else: raise exception.NoValidHost(reason='Unable to find suitable ' 'host for VMs evacuation') def set_host_enabled(self, host, enabled): """Sets the specified host's ability to accept new instances.""" # Since capabilities are gone, use service table to disable a node # in scheduler status = {'disabled': not enabled, 'disabled_reason': 'set by xenapi host_state' } cntxt = context.get_admin_context() service = self._conductor_api.service_get_by_args( cntxt, host, 'nova-compute') self._conductor_api.service_update( cntxt, service, status) args = {"enabled": jsonutils.dumps(enabled)} response = call_xenhost(self._session, "set_host_enabled", args) return response.get("status", response) def get_host_uptime(self, _host): """Returns the result of calling "uptime" on the target host.""" response = call_xenhost(self._session, "host_uptime", {}) return response.get("uptime", response) class HostState(object): """Manages information about the XenServer host this compute node is running on. """ def __init__(self, session): super(HostState, self).__init__() self._session = session self._stats = {} self._pci_device_filter = pci_whitelist.get_pci_devices_filter() self.update_status() def _get_passthrough_devices(self): """Get a list pci devices that are available for pci passthtough. We use a plugin to get the output of the lspci command runs on dom0. From this list we will extract pci devices that are using the pciback kernel driver. Then we compare this list to the pci whitelist to get a new list of pci devices that can be used for pci passthrough. :returns: a list of pci devices available for pci passthrough. """ def _compile_hex(pattern): """Return a compiled regular expression pattern into which we have replaced occurrences of hex by [\da-fA-F]. """ return re.compile(pattern.replace("hex", r"[\da-fA-F]")) def _parse_pci_device_string(dev_string): """Exctract information from the device string about the slot, the vendor and the product ID. The string is as follow: "Slot:\tBDF\nClass:\txxxx\nVendor:\txxxx\nDevice:\txxxx\n..." Return a dictionary with informations about the device. """ slot_regex = _compile_hex(r"Slot:\t" r"((?:hex{4}:)?" # Domain: (optional) r"hex{2}:" # Bus: r"hex{2}\." # Device. r"hex{1})") # Function vendor_regex = _compile_hex(r"\nVendor:\t(hex+)") product_regex = _compile_hex(r"\nDevice:\t(hex+)") slot_id = slot_regex.findall(dev_string) vendor_id = vendor_regex.findall(dev_string) product_id = product_regex.findall(dev_string) if not slot_id or not vendor_id or not product_id: raise exception.NovaException( _("Failed to parse information about" " a pci device for passthrough")) type_pci = self._session.call_plugin_serialized( 'xenhost', 'get_pci_type', slot_id[0]) return {'label': '_'.join(['label', vendor_id[0], product_id[0]]), 'vendor_id': vendor_id[0], 'product_id': product_id[0], 'address': slot_id[0], 'dev_id': '_'.join(['pci', slot_id[0]]), 'dev_type': type_pci, 'status': 'available'} # Devices are separated by a blank line. That is why we # use "\n\n" as separator. lspci_out = self._session.call_plugin_serialized( 'xenhost', 'get_pci_device_details') pci_list = lspci_out.split("\n\n") # For each device of the list, check if it uses the pciback # kernel driver and if it does, get informations and add it # to the list of passthrough_devices. Ignore it if the driver # is not pciback. passthrough_devices = [] for dev_string_info in pci_list: if "Driver:\tpciback" in dev_string_info: new_dev = _parse_pci_device_string(dev_string_info) if self._pci_device_filter.device_assignable(new_dev): passthrough_devices.append(new_dev) return passthrough_devices def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run the update first. """ if refresh or not self._stats: self.update_status() return self._stats def update_status(self): """Since under Xenserver, a compute node runs on a given host, we can get host status information using xenapi. """ LOG.debug(_("Updating host stats")) data = call_xenhost(self._session, "host_data", {}) if data: sr_ref = vm_utils.scan_default_sr(self._session) sr_rec = self._session.SR.get_record(sr_ref) total = int(sr_rec["physical_size"]) used = int(sr_rec["physical_utilisation"]) data["disk_total"] = total data["disk_used"] = used data["disk_available"] = total - used data["supported_instances"] = to_supported_instances( data.get("host_capabilities") ) host_memory = data.get('host_memory', None) if host_memory: data["host_memory_total"] = host_memory.get('total', 0) data["host_memory_overhead"] = host_memory.get('overhead', 0) data["host_memory_free"] = host_memory.get('free', 0) data["host_memory_free_computed"] = host_memory.get( 'free-computed', 0) del data['host_memory'] if (data['host_hostname'] != self._stats.get('host_hostname', data['host_hostname'])): LOG.error(_('Hostname has changed from %(old)s ' 'to %(new)s. A restart is required to take effect.' ) % {'old': self._stats['host_hostname'], 'new': data['host_hostname']}) data['host_hostname'] = self._stats['host_hostname'] data['hypervisor_hostname'] = data['host_hostname'] vcpus_used = 0 for vm_ref, vm_rec in vm_utils.list_vms(self._session): vcpus_used = vcpus_used + int(vm_rec['VCPUs_max']) data['vcpus_used'] = vcpus_used data['pci_passthrough_devices'] = self._get_passthrough_devices() self._stats = data def to_supported_instances(host_capabilities): if not host_capabilities: return [] result = [] for capability in host_capabilities: try: ostype, _version, arch = capability.split("-") result.append((arch, 'xapi', ostype)) except ValueError: LOG.warning( _("Failed to extract instance support from %s"), capability) return result def call_xenhost(session, method, arg_dict): """There will be several methods that will need this general handling for interacting with the xenhost plugin, so this abstracts out that behavior. """ # Create a task ID as something that won't match any instance ID try: result = session.call_plugin('xenhost', method, args=arg_dict) if not result: return '' return jsonutils.loads(result) except ValueError: LOG.exception(_("Unable to get updated status")) return None except session.XenAPI.Failure as e: LOG.error(_("The call to %(method)s returned " "an error: %(e)s."), {'method': method, 'e': e}) return e.details[1] def _uuid_find(context, host, name_label): """Return instance uuid by name_label.""" for i in instance_obj.InstanceList.get_by_host(context, host): if i.name == name_label: return i.uuid return None def _host_find(context, session, src_aggregate, host_ref): """Return the host from the xenapi host reference. :param src_aggregate: the aggregate that the compute host being put in maintenance (source of VMs) belongs to :param host_ref: the hypervisor host reference (destination of VMs) :return: the compute host that manages host_ref """ # NOTE: this would be a lot simpler if nova-compute stored # CONF.host in the XenServer host's other-config map. # TODO(armando-migliaccio): improve according the note above uuid = session.host.get_uuid(host_ref) for compute_host, host_uuid in src_aggregate.metadetails.iteritems(): if host_uuid == uuid: return compute_host raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found ' 'from aggregate metadata: %(metadata)s.' % {'host_uuid': uuid, 'metadata': src_aggregate.metadetails}) nova-2014.1.5/nova/virt/xenapi/pool_states.py0000664000567000056700000000417012540642532022175 0ustar jenkinsjenkins00000000000000# Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Possible states for xen resource pools. A pool may be 'created', in which case the admin has triggered its creation, but the underlying hypervisor pool has not actually being set up yet. A pool may be 'changing', meaning that the underlying hypervisor pool is being setup. A pool may be 'active', in which case the underlying hypervisor pool is up and running. A pool may be 'dismissed' when it has no hosts and it has been deleted. A pool may be in 'error' in all other cases. A 'created' pool becomes 'changing' during the first request of adding a host. During a 'changing' status no other requests will be accepted; this is to allow the hypervisor layer to instantiate the underlying pool without any potential race condition that may incur in master/slave-based configurations. The pool goes into the 'active' state when the underlying pool has been correctly instantiated. All other operations (e.g. add/remove hosts) that succeed will keep the pool in the 'active' state. If a number of continuous requests fail, an 'active' pool goes into an 'error' state. To recover from such a state, admin intervention is required. Currently an error state is irreversible, that is, in order to recover from it a pool must be deleted. """ CREATED = 'created' CHANGING = 'changing' ACTIVE = 'active' ERROR = 'error' DISMISSED = 'dismissed' # Metadata keys KEY = 'operational_state' POOL_FLAG = 'hypervisor_pool' def is_hv_pool(metadata): """Checks if aggregate is a hypervisor_pool.""" return POOL_FLAG in metadata.keys() nova-2014.1.5/nova/virt/xenapi/agent.py0000664000567000056700000004407312540642544020750 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import binascii from distutils import version import os import sys import time import uuid from oslo.config import cfg from nova.api.metadata import password from nova.compute import utils as compute_utils from nova import conductor from nova import context from nova import crypto from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import strutils from nova import utils USE_AGENT_KEY = "xenapi_use_agent" USE_AGENT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + USE_AGENT_KEY SKIP_SSH_KEY = "xenapi_skip_agent_inject_ssh" SKIP_SSH_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + SKIP_SSH_KEY SKIP_FILES_AT_BOOT_KEY = "xenapi_skip_agent_inject_files_at_boot" SKIP_FILES_AT_BOOT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX \ + SKIP_FILES_AT_BOOT_KEY LOG = logging.getLogger(__name__) xenapi_agent_opts = [ cfg.IntOpt('agent_timeout', default=30, deprecated_name='agent_timeout', deprecated_group='DEFAULT', help='Number of seconds to wait for agent reply'), cfg.IntOpt('agent_version_timeout', default=300, deprecated_name='agent_version_timeout', deprecated_group='DEFAULT', help='Number of seconds to wait for agent ' 'to be fully operational'), cfg.IntOpt('agent_resetnetwork_timeout', deprecated_name='agent_resetnetwork_timeout', deprecated_group='DEFAULT', default=60, help='Number of seconds to wait for agent reply ' 'to resetnetwork request'), cfg.StrOpt('agent_path', default='usr/sbin/xe-update-networking', deprecated_name='xenapi_agent_path', deprecated_group='DEFAULT', help='Specifies the path in which the XenAPI guest agent ' 'should be located. If the agent is present, network ' 'configuration is not injected into the image. ' 'Used if compute_driver=xenapi.XenAPIDriver and ' 'flat_injected=True'), cfg.BoolOpt('disable_agent', default=False, deprecated_name='xenapi_disable_agent', deprecated_group='DEFAULT', help='Disables the use of the XenAPI agent in any image ' 'regardless of what image properties are present.'), cfg.BoolOpt('use_agent_default', default=False, deprecated_name='xenapi_use_agent_default', deprecated_group='DEFAULT', help='Determines if the XenAPI agent should be used when ' 'the image used does not contain a hint to declare if ' 'the agent is present or not. ' 'The hint is a glance property "' + USE_AGENT_KEY + '" ' 'that has the value "True" or "False". ' 'Note that waiting for the agent when it is not present ' 'will significantly increase server boot times.'), ] CONF = cfg.CONF # xenapi_agent options in the DEFAULT group were deprecated in Icehouse CONF.register_opts(xenapi_agent_opts, 'xenserver') def _call_agent(session, instance, vm_ref, method, addl_args=None, timeout=None, success_codes=None): """Abstracts out the interaction with the agent xenapi plugin.""" if addl_args is None: addl_args = {} if timeout is None: timeout = CONF.xenserver.agent_timeout if success_codes is None: success_codes = ['0'] # always fetch domid because VM may have rebooted dom_id = session.VM.get_domid(vm_ref) args = { 'id': str(uuid.uuid4()), 'dom_id': str(dom_id), 'timeout': str(timeout), } args.update(addl_args) try: ret = session.call_plugin('agent', method, args) except session.XenAPI.Failure as e: err_msg = e.details[-1].splitlines()[-1] if 'TIMEOUT:' in err_msg: LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' 'args=%(args)r'), {'method': method, 'args': args}, instance=instance) raise exception.AgentTimeout(method=method) elif 'NOT IMPLEMENTED:' in err_msg: LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not ' 'supported by the agent. args=%(args)r'), {'method': method, 'args': args}, instance=instance) raise exception.AgentNotImplemented(method=method) else: LOG.error(_('The call to %(method)s returned an error: %(e)s. ' 'args=%(args)r'), {'method': method, 'args': args, 'e': e}, instance=instance) raise exception.AgentError(method=method) if not isinstance(ret, dict): try: ret = jsonutils.loads(ret) except TypeError: LOG.error(_('The agent call to %(method)s returned an invalid ' 'response: %(ret)r. args=%(args)r'), {'method': method, 'ret': ret, 'args': args}, instance=instance) raise exception.AgentError(method=method) if ret['returncode'] not in success_codes: LOG.error(_('The agent call to %(method)s returned an ' 'an error: %(ret)r. args=%(args)r'), {'method': method, 'ret': ret, 'args': args}, instance=instance) raise exception.AgentError(method=method) LOG.debug(_('The agent call to %(method)s was successful: ' '%(ret)r. args=%(args)r'), {'method': method, 'ret': ret, 'args': args}, instance=instance) # Some old versions of the Windows agent have a trailing \\r\\n # (ie CRLF escaped) for some reason. Strip that off. return ret['message'].replace('\\r\\n', '') def is_upgrade_required(current_version, available_version): # NOTE(johngarbutt): agent version numbers are four part, # so we need to use the loose version to compare them current = version.LooseVersion(current_version) available = version.LooseVersion(available_version) return available > current class XenAPIBasedAgent(object): def __init__(self, session, virtapi, instance, vm_ref): self.session = session self.virtapi = virtapi self.instance = instance self.vm_ref = vm_ref def _add_instance_fault(self, error, exc_info): LOG.warning(_("Ignoring error while configuring instance with " "agent: %s") % error, instance=self.instance, exc_info=True) try: ctxt = context.get_admin_context() capi = conductor.API() compute_utils.add_instance_fault_from_exc( ctxt, capi, self.instance, error, exc_info=exc_info) except Exception: pass def _call_agent(self, method, addl_args=None, timeout=None, success_codes=None, ignore_errors=True): try: return _call_agent(self.session, self.instance, self.vm_ref, method, addl_args, timeout, success_codes) except exception.AgentError as error: if ignore_errors: self._add_instance_fault(error, sys.exc_info()) else: raise def get_version(self): LOG.debug(_('Querying agent version'), instance=self.instance) # The agent can be slow to start for a variety of reasons. On Windows, # it will generally perform a setup process on first boot that can # take a couple of minutes and then reboot. On Linux, the system can # also take a while to boot. expiration = time.time() + CONF.xenserver.agent_version_timeout while True: try: # NOTE(johngarbutt): we can't use the xapi plugin # timeout, because the domid may change when # the server is rebooted return self._call_agent('version', ignore_errors=False) except exception.AgentError as error: if time.time() > expiration: self._add_instance_fault(error, sys.exc_info()) return def _get_expected_build(self): ctxt = context.get_admin_context() agent_build = self.virtapi.agent_build_get_by_triple( ctxt, 'xen', self.instance['os_type'], self.instance['architecture']) if agent_build: LOG.debug(_('Latest agent build for %(hypervisor)s/%(os)s' '/%(architecture)s is %(version)s') % agent_build) else: LOG.debug(_('No agent build found for %(hypervisor)s/%(os)s' '/%(architecture)s') % { 'hypervisor': 'xen', 'os': self.instance['os_type'], 'architecture': self.instance['architecture']}) return agent_build def update_if_needed(self, version): agent_build = self._get_expected_build() if version and agent_build and \ is_upgrade_required(version, agent_build['version']): LOG.debug(_('Updating agent to %s'), agent_build['version'], instance=self.instance) self._perform_update(agent_build) else: LOG.debug(_('Skipping agent update.'), instance=self.instance) def _perform_update(self, agent_build): args = {'url': agent_build['url'], 'md5sum': agent_build['md5hash']} try: self._call_agent('agentupdate', args) except exception.AgentError as exc: # Silently fail for agent upgrades LOG.warning(_("Unable to update the agent due " "to: %(exc)s") % dict(exc=exc), instance=self.instance) def _exchange_key_with_agent(self): dh = SimpleDH() args = {'pub': str(dh.get_public())} resp = self._call_agent('key_init', args, success_codes=['D0'], ignore_errors=False) agent_pub = int(resp) dh.compute_shared(agent_pub) return dh def _save_instance_password_if_sshkey_present(self, new_pass): sshkey = self.instance.get('key_data') if sshkey and sshkey.startswith("ssh-rsa"): ctxt = context.get_admin_context() enc = crypto.ssh_encrypt_text(sshkey, new_pass) sys_meta = utils.instance_sys_meta(self.instance) sys_meta.update(password.convert_password(ctxt, base64.b64encode(enc))) self.virtapi.instance_update(ctxt, self.instance['uuid'], {'system_metadata': sys_meta}) def set_admin_password(self, new_pass): """Set the root/admin password on the VM instance. This is done via an agent running on the VM. Communication between nova and the agent is done via writing xenstore records. Since communication is done over the XenAPI RPC calls, we need to encrypt the password. We're using a simple Diffie-Hellman class instead of a more advanced library (such as M2Crypto) for compatibility with the agent code. """ LOG.debug(_('Setting admin password'), instance=self.instance) try: dh = self._exchange_key_with_agent() except exception.AgentError as error: self._add_instance_fault(error, sys.exc_info()) return # Some old versions of Linux and Windows agent expect trailing \n # on password to work correctly. enc_pass = dh.encrypt(new_pass + '\n') args = {'enc_pass': enc_pass} self._call_agent('password', args) self._save_instance_password_if_sshkey_present(new_pass) def inject_ssh_key(self): sshkey = self.instance.get('key_data') if not sshkey: return if self.instance['os_type'] == 'windows': LOG.debug(_("Skipping setting of ssh key for Windows."), instance=self.instance) return if self._skip_ssh_key_inject(): LOG.debug(_("Skipping agent ssh key injection for this image."), instance=self.instance) return sshkey = str(sshkey) keyfile = '/root/.ssh/authorized_keys' key_data = ''.join([ '\n', '# The following ssh key was injected by Nova', '\n', sshkey.strip(), '\n', ]) return self.inject_file(keyfile, key_data) def inject_files(self, injected_files): if self._skip_inject_files_at_boot(): LOG.debug(_("Skipping agent file injection for this image."), instance=self.instance) else: for path, contents in injected_files: self.inject_file(path, contents) def inject_file(self, path, contents): LOG.debug(_('Injecting file path: %r'), path, instance=self.instance) # Files/paths must be base64-encoded for transmission to agent b64_path = base64.b64encode(path) b64_contents = base64.b64encode(contents) args = {'b64_path': b64_path, 'b64_contents': b64_contents} return self._call_agent('inject_file', args) def resetnetwork(self): LOG.debug(_('Resetting network'), instance=self.instance) #NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success return self._call_agent('resetnetwork', timeout=CONF.xenserver.agent_resetnetwork_timeout, success_codes=['0', '500']) def _skip_ssh_key_inject(self): return self._get_sys_meta_key(SKIP_SSH_SM_KEY) def _skip_inject_files_at_boot(self): return self._get_sys_meta_key(SKIP_FILES_AT_BOOT_SM_KEY) def _get_sys_meta_key(self, key): sys_meta = utils.instance_sys_meta(self.instance) raw_value = sys_meta.get(key, 'False') return strutils.bool_from_string(raw_value, strict=False) def find_guest_agent(base_dir): """tries to locate a guest agent at the path specified by agent_rel_path """ if CONF.xenserver.disable_agent: return False agent_rel_path = CONF.xenserver.agent_path agent_path = os.path.join(base_dir, agent_rel_path) if os.path.isfile(agent_path): # The presence of the guest agent # file indicates that this instance can # reconfigure the network from xenstore data, # so manipulation of files in /etc is not # required LOG.info(_('XenServer tools installed in this ' 'image are capable of network injection. ' 'Networking files will not be' 'manipulated')) return True xe_daemon_filename = os.path.join(base_dir, 'usr', 'sbin', 'xe-daemon') if os.path.isfile(xe_daemon_filename): LOG.info(_('XenServer tools are present ' 'in this image but are not capable ' 'of network injection')) else: LOG.info(_('XenServer tools are not ' 'installed in this image')) return False def should_use_agent(instance): sys_meta = utils.instance_sys_meta(instance) if USE_AGENT_SM_KEY not in sys_meta: return CONF.xenserver.use_agent_default else: use_agent_raw = sys_meta[USE_AGENT_SM_KEY] try: return strutils.bool_from_string(use_agent_raw, strict=True) except ValueError: LOG.warn(_("Invalid 'agent_present' value. " "Falling back to the default."), instance=instance) return CONF.xenserver.use_agent_default class SimpleDH(object): """This class wraps all the functionality needed to implement basic Diffie-Hellman-Merkle key exchange in Python. It features intelligent defaults for the prime and base numbers needed for the calculation, while allowing you to supply your own. It requires that the openssl binary be installed on the system on which this is run, as it uses that to handle the encryption and decryption. If openssl is not available, a RuntimeError will be raised. """ def __init__(self): self._prime = 162259276829213363391578010288127 self._base = 5 self._public = None self._shared = None self.generate_private() def generate_private(self): self._private = int(binascii.hexlify(os.urandom(10)), 16) return self._private def get_public(self): self._public = pow(self._base, self._private, self._prime) return self._public def compute_shared(self, other): self._shared = pow(other, self._private, self._prime) return self._shared def _run_ssl(self, text, decrypt=False): cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass', 'pass:%s' % self._shared, '-nosalt'] if decrypt: cmd.append('-d') out, err = utils.execute(*cmd, process_input=text) if err: raise RuntimeError(_('OpenSSL error: %s') % err) return out def encrypt(self, text): return self._run_ssl(text).strip('\n') def decrypt(self, text): return self._run_ssl(text, decrypt=True) nova-2014.1.5/nova/virt/xenapi/vif.py0000664000567000056700000001511112540642544020425 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 Citrix Systems, Inc. # Copyright 2011 OpenStack Foundation # Copyright (C) 2011 Nicira, Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """VIF drivers for XenAPI.""" from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.virt.xenapi import network_utils from nova.virt.xenapi import vm_utils xenapi_ovs_integration_bridge_opt = cfg.StrOpt('ovs_integration_bridge', default='xapi1', #Deprecated in Icehouse deprecated_name='xenapi_ovs_integration_bridge', deprecated_group='DEFAULT', help='Name of Integration Bridge used by Open vSwitch') CONF = cfg.CONF CONF.register_opt(xenapi_ovs_integration_bridge_opt, 'xenserver') class XenVIFDriver(object): def __init__(self, xenapi_session): self._session = xenapi_session class XenAPIBridgeDriver(XenVIFDriver): """VIF Driver for XenAPI that uses XenAPI to create Networks.""" def plug(self, instance, vif, vm_ref=None, device=None): if not vm_ref: vm_ref = vm_utils.lookup(self._session, instance['name']) if not device: device = 0 if vif['network'].get_meta('should_create_vlan'): network_ref = self._ensure_vlan_bridge(vif['network']) else: network_ref = network_utils.find_network_with_bridge( self._session, vif['network']['bridge']) vif_rec = {} vif_rec['device'] = str(device) vif_rec['network'] = network_ref vif_rec['VM'] = vm_ref vif_rec['MAC'] = vif['address'] vif_rec['MTU'] = '1500' vif_rec['other_config'] = {} if vif.get_meta('rxtx_cap'): vif_rec['qos_algorithm_type'] = 'ratelimit' vif_rec['qos_algorithm_params'] = {'kbps': str(int(vif.get_meta('rxtx_cap')) * 1024)} else: vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} return vif_rec def _ensure_vlan_bridge(self, network): """Ensure that a VLAN bridge exists.""" vlan_num = network.get_meta('vlan') bridge = network['bridge'] bridge_interface = (CONF.vlan_interface or network.get_meta('bridge_interface')) # Check whether bridge already exists # Retrieve network whose name_label is "bridge" network_ref = network_utils.find_network_with_name_label( self._session, bridge) if network_ref is None: # If bridge does not exists # 1 - create network description = 'network for nova bridge %s' % bridge network_rec = {'name_label': bridge, 'name_description': description, 'other_config': {}} network_ref = self._session.call_xenapi('network.create', network_rec) # 2 - find PIF for VLAN NOTE(salvatore-orlando): using double # quotes inside single quotes as xapi filter only support # tokens in double quotes expr = ('field "device" = "%s" and field "VLAN" = "-1"' % bridge_interface) pifs = self._session.call_xenapi('PIF.get_all_records_where', expr) pif_ref = None # Multiple PIF are ok: we are dealing with a pool if len(pifs) == 0: raise Exception(_('Found no PIF for device %s') % bridge_interface) for pif_ref in pifs.keys(): self._session.call_xenapi('VLAN.create', pif_ref, str(vlan_num), network_ref) else: # Check VLAN tag is appropriate network_rec = self._session.call_xenapi('network.get_record', network_ref) # Retrieve PIFs from network for pif_ref in network_rec['PIFs']: # Retrieve VLAN from PIF pif_rec = self._session.call_xenapi('PIF.get_record', pif_ref) pif_vlan = int(pif_rec['VLAN']) # Raise an exception if VLAN != vlan_num if pif_vlan != vlan_num: raise Exception(_("PIF %(pif_uuid)s for network " "%(bridge)s has VLAN id %(pif_vlan)d. " "Expected %(vlan_num)d"), {'pif_uuid': pif_rec['uuid'], 'bridge': bridge, 'pif_vlan': pif_vlan, 'vlan_num': vlan_num}) return network_ref def unplug(self, instance, vif): pass class XenAPIOpenVswitchDriver(XenVIFDriver): """VIF driver for Open vSwitch with XenAPI.""" def plug(self, instance, vif, vm_ref=None, device=None): if not vm_ref: vm_ref = vm_utils.lookup(self._session, instance['name']) if not device: device = 0 # with OVS model, always plug into an OVS integration bridge # that is already created network_ref = network_utils.find_network_with_bridge( self._session, CONF.xenserver.ovs_integration_bridge) vif_rec = {} vif_rec['device'] = str(device) vif_rec['network'] = network_ref vif_rec['VM'] = vm_ref vif_rec['MAC'] = vif['address'] vif_rec['MTU'] = '1500' vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} # OVS on the hypervisor monitors this key and uses it to # set the iface-id attribute vif_rec['other_config'] = {'nicira-iface-id': vif['id']} return vif_rec def unplug(self, instance, vif): pass nova-2014.1.5/nova/virt/xenapi/firewall.py0000664000567000056700000001116212540642544021450 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import context from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.virt import firewall from nova.virt import netutils LOG = logging.getLogger(__name__) class Dom0IptablesFirewallDriver(firewall.IptablesFirewallDriver): """Dom0IptablesFirewallDriver class This class provides an implementation for nova.virt.Firewall using iptables. This class is meant to be used with the xenapi backend and uses xenapi plugin to enforce iptables rules in dom0. """ def _plugin_execute(self, *cmd, **kwargs): # Prepare arguments for plugin call args = {} args.update(map(lambda x: (x, str(kwargs[x])), kwargs)) args['cmd_args'] = jsonutils.dumps(cmd) ret = self._session.call_plugin('xenhost', 'iptables_config', args) json_ret = jsonutils.loads(ret) return (json_ret['out'], json_ret['err']) def __init__(self, virtapi, xenapi_session=None, **kwargs): from nova.network import linux_net super(Dom0IptablesFirewallDriver, self).__init__(virtapi, **kwargs) self._session = xenapi_session # Create IpTablesManager with executor through plugin self.iptables = linux_net.IptablesManager(self._plugin_execute) self.iptables.ipv4['filter'].add_chain('sg-fallback') self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') def _build_tcp_udp_rule(self, rule, version): if rule['from_port'] == rule['to_port']: return ['--dport', '%s' % (rule['from_port'],)] else: # No multiport needed for XS! return ['--dport', '%s:%s' % (rule['from_port'], rule['to_port'])] def _provider_rules(self): """Generate a list of rules from provider for IP4 & IP6. Note: We could not use the common code from virt.firewall because XS doesn't accept the '-m multiport' option. """ ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] rules = self._virtapi.provider_fw_rule_get_all(ctxt) for rule in rules: LOG.debug(_('Adding provider rule: %s'), rule['cidr']) version = netutils.get_ip_version(rule['cidr']) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule['protocol'] if version == 6 and protocol == 'icmp': protocol = 'icmpv6' args = ['-p', protocol, '-s', rule['cidr']] if protocol in ['udp', 'tcp']: if rule['from_port'] == rule['to_port']: args += ['--dport', '%s' % (rule['from_port'],)] else: args += ['--dport', '%s:%s' % (rule['from_port'], rule['to_port'])] elif protocol == 'icmp': icmp_type = rule['from_port'] icmp_code = rule['to_port'] if icmp_type == -1: icmp_type_arg = None else: icmp_type_arg = '%s' % icmp_type if not icmp_code == -1: icmp_type_arg += '/%s' % icmp_code if icmp_type_arg: if version == 4: args += ['-m', 'icmp', '--icmp-type', icmp_type_arg] elif version == 6: args += ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg] args += ['-j DROP'] fw_rules += [' '.join(args)] return ipv4_rules, ipv6_rules nova-2014.1.5/nova/virt/xenapi/pool.py0000664000567000056700000002731112540642544020617 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Citrix Systems, Inc. # Copyright 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for Pool-related functions (join, eject, etc). """ from oslo.config import cfg import six.moves.urllib.parse as urlparse from nova.compute import rpcapi as compute_rpcapi from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.virt.xenapi import pool_states from nova.virt.xenapi import vm_utils LOG = logging.getLogger(__name__) xenapi_pool_opts = [ cfg.BoolOpt('use_join_force', #Deprecated in Icehouse deprecated_name='use_join_force', deprecated_group='DEFAULT', default=True, help='To use for hosts with different CPUs'), ] CONF = cfg.CONF CONF.register_opts(xenapi_pool_opts, 'xenserver') CONF.import_opt('host', 'nova.netconf') class ResourcePool(object): """Implements resource pool operations.""" def __init__(self, session, virtapi): host_rec = session.host.get_record(session.host_ref) self._host_name = host_rec['hostname'] self._host_addr = host_rec['address'] self._host_uuid = host_rec['uuid'] self._session = session self._virtapi = virtapi self.compute_rpcapi = compute_rpcapi.ComputeAPI() def undo_aggregate_operation(self, context, op, aggregate, host, set_error): """Undo aggregate operation when pool error raised.""" try: if set_error: metadata = {pool_states.KEY: pool_states.ERROR} aggregate.update_metadata(metadata) op(context, aggregate, host) except Exception: LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state ' 'during operation on %(host)s'), {'aggregate_id': aggregate['id'], 'host': host}) def add_to_aggregate(self, context, aggregate, host, slave_info=None): """Add a compute host to an aggregate.""" if not pool_states.is_hv_pool(aggregate['metadata']): return invalid = {pool_states.CHANGING: 'setup in progress', pool_states.DISMISSED: 'aggregate deleted', pool_states.ERROR: 'aggregate in error'} if (aggregate['metadata'][pool_states.KEY] in invalid.keys()): raise exception.InvalidAggregateAction( action='add host', aggregate_id=aggregate['id'], reason=aggregate['metadata'][pool_states.KEY]) if (aggregate['metadata'][pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) if len(aggregate['hosts']) == 1: # this is the first host of the pool -> make it master self._init_pool(aggregate['id'], aggregate['name']) # save metadata so that we can find the master again metadata = {'master_compute': host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE} aggregate.update_metadata(metadata) else: # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. master_compute = aggregate['metadata']['master_compute'] if master_compute == CONF.host and master_compute != host: # this is the master -> do a pool-join # To this aim, nova compute on the slave has to go down. # NOTE: it is assumed that ONLY nova compute is running now self._join_slave(aggregate['id'], host, slave_info.get('compute_uuid'), slave_info.get('url'), slave_info.get('user'), slave_info.get('passwd')) metadata = {host: slave_info.get('xenhost_uuid'), } aggregate.update_metadata(metadata) elif master_compute and master_compute != host: # send rpc cast to master, asking to add the following # host with specified credentials. slave_info = self._create_slave_info() self.compute_rpcapi.add_aggregate_host( context, aggregate, host, master_compute, slave_info) def remove_from_aggregate(self, context, aggregate, host, slave_info=None): """Remove a compute host from an aggregate.""" slave_info = slave_info or dict() if not pool_states.is_hv_pool(aggregate['metadata']): return invalid = {pool_states.CREATED: 'no hosts to remove', pool_states.CHANGING: 'setup in progress', pool_states.DISMISSED: 'aggregate deleted', } if aggregate['metadata'][pool_states.KEY] in invalid.keys(): raise exception.InvalidAggregateAction( action='remove host', aggregate_id=aggregate['id'], reason=invalid[aggregate['metadata'][pool_states.KEY]]) master_compute = aggregate['metadata']['master_compute'] if master_compute == CONF.host and master_compute != host: # this is the master -> instruct it to eject a host from the pool host_uuid = aggregate['metadata'][host] self._eject_slave(aggregate['id'], slave_info.get('compute_uuid'), host_uuid) aggregate.update_metadata({host: None}) elif master_compute == host: # Remove master from its own pool -> destroy pool only if the # master is on its own, otherwise raise fault. Destroying a # pool made only by master is fictional if len(aggregate['hosts']) > 1: # NOTE: this could be avoided by doing a master # re-election, but this is simpler for now. raise exception.InvalidAggregateAction( aggregate_id=aggregate['id'], action='remove_from_aggregate', reason=_('Unable to eject %s ' 'from the pool; pool not empty') % host) self._clear_pool(aggregate['id']) aggregate.update_metadata({'master_compute': None, host: None}) elif master_compute and master_compute != host: # A master exists -> forward pool-eject request to master slave_info = self._create_slave_info() self.compute_rpcapi.remove_aggregate_host( context, aggregate['id'], host, master_compute, slave_info) else: # this shouldn't have happened raise exception.AggregateError(aggregate_id=aggregate['id'], action='remove_from_aggregate', reason=_('Unable to eject %s ' 'from the pool; No master found') % host) def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): """Joins a slave into a XenServer resource pool.""" try: args = {'compute_uuid': compute_uuid, 'url': url, 'user': user, 'password': passwd, 'force': jsonutils.dumps(CONF.xenserver.use_join_force), 'master_addr': self._host_addr, 'master_user': CONF.xenserver.connection_username, 'master_pass': CONF.xenserver.connection_password, } self._session.call_plugin('xenhost', 'host_join', args) except self._session.XenAPI.Failure as e: LOG.error(_("Pool-Join failed: %s"), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='add_to_aggregate', reason=_('Unable to join %s ' 'in the pool') % host) def _eject_slave(self, aggregate_id, compute_uuid, host_uuid): """Eject a slave from a XenServer resource pool.""" try: # shutdown nova-compute; if there are other VMs running, e.g. # guest instances, the eject will fail. That's a precaution # to deal with the fact that the admin should evacuate the host # first. The eject wipes out the host completely. vm_ref = self._session.VM.get_by_uuid(compute_uuid) self._session.VM.clean_shutdown(vm_ref) host_ref = self._session.host.get_by_uuid(host_uuid) self._session.pool.eject(host_ref) except self._session.XenAPI.Failure as e: LOG.error(_("Pool-eject failed: %s"), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='remove_from_aggregate', reason=str(e.details)) def _init_pool(self, aggregate_id, aggregate_name): """Set the name label of a XenServer pool.""" try: pool_ref = self._session.pool.get_all()[0] self._session.pool.set_name_label(pool_ref, aggregate_name) except self._session.XenAPI.Failure as e: LOG.error(_("Unable to set up pool: %s."), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='add_to_aggregate', reason=str(e.details)) def _clear_pool(self, aggregate_id): """Clear the name label of a XenServer pool.""" try: pool_ref = self._session.pool.get_all()[0] self._session.pool.set_name_label(pool_ref, '') except self._session.XenAPI.Failure as e: LOG.error(_("Pool-set_name_label failed: %s"), e) raise exception.AggregateError(aggregate_id=aggregate_id, action='remove_from_aggregate', reason=str(e.details)) def _create_slave_info(self): """XenServer specific info needed to join the hypervisor pool.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi # NOTE: password in clear is not great, but it'll do for now sender_url = swap_xapi_host( CONF.xenserver.connection_url, self._host_addr) return { "url": sender_url, "user": CONF.xenserver.connection_username, "passwd": CONF.xenserver.connection_password, "compute_uuid": vm_utils.get_this_vm_uuid(None), "xenhost_uuid": self._host_uuid, } def swap_xapi_host(url, host_addr): """Replace the XenServer address present in 'url' with 'host_addr'.""" temp_url = urlparse.urlparse(url) _netloc, sep, port = temp_url.netloc.partition(':') return url.replace(temp_url.netloc, '%s%s%s' % (host_addr, sep, port)) nova-2014.1.5/nova/virt/libvirt/0000775000567000056700000000000012540643452017456 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/libvirt/volume.py0000664000567000056700000013557712540642544021362 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Volume drivers for libvirt.""" import glob import os import time import urllib2 from oslo.config import cfg import six import six.moves.urllib.parse as urlparse from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common.gettextutils import _LW from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import processutils from nova import paths from nova.storage import linuxscsi from nova import utils from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import utils as virtutils LOG = logging.getLogger(__name__) volume_opts = [ cfg.IntOpt('num_iscsi_scan_tries', default=5, help='Number of times to rescan iSCSI target to find volume', deprecated_group='DEFAULT'), cfg.IntOpt('num_iser_scan_tries', default=5, help='Number of times to rescan iSER target to find volume', deprecated_group='DEFAULT'), cfg.StrOpt('rbd_user', help='The RADOS client name for accessing rbd volumes', deprecated_group='DEFAULT'), cfg.StrOpt('rbd_secret_uuid', help='The libvirt UUID of the secret for the rbd_user' 'volumes', deprecated_group='DEFAULT'), cfg.StrOpt('nfs_mount_point_base', default=paths.state_path_def('mnt'), help='Directory where the NFS volume is mounted on the' ' compute node', deprecated_group='DEFAULT'), cfg.StrOpt('nfs_mount_options', help='Mount options passedf to the NFS client. See section ' 'of the nfs man page for details', deprecated_group='DEFAULT'), cfg.IntOpt('num_aoe_discover_tries', default=3, help='Number of times to rediscover AoE target to find volume', deprecated_group='DEFAULT'), cfg.StrOpt('glusterfs_mount_point_base', default=paths.state_path_def('mnt'), help='Directory where the glusterfs volume is mounted on the ' 'compute node', deprecated_group='DEFAULT'), cfg.BoolOpt('iscsi_use_multipath', default=False, help='Use multipath connection of the iSCSI volume', deprecated_group='DEFAULT', deprecated_name='libvirt_iscsi_use_multipath'), cfg.BoolOpt('iser_use_multipath', default=False, help='Use multipath connection of the iSER volume', deprecated_group='DEFAULT', deprecated_name='libvirt_iser_use_multipath'), cfg.StrOpt('scality_sofs_config', help='Path or URL to Scality SOFS configuration file', deprecated_group='DEFAULT'), cfg.StrOpt('scality_sofs_mount_point', default='$state_path/scality', help='Base dir where Scality SOFS shall be mounted', deprecated_group='DEFAULT'), cfg.ListOpt('qemu_allowed_storage_drivers', default=[], help='Protocols listed here will be accessed directly ' 'from QEMU. Currently supported protocols: [gluster]', deprecated_group='DEFAULT') ] CONF = cfg.CONF CONF.register_opts(volume_opts, 'libvirt') class LibvirtBaseVolumeDriver(object): """Base class for volume drivers.""" def __init__(self, connection, is_block_dev): self.connection = connection self.is_block_dev = is_block_dev def connect_volume(self, connection_info, disk_info): """Connect the volume. Returns xml for libvirt.""" conf = vconfig.LibvirtConfigGuestDisk() conf.driver_name = virtutils.pick_disk_driver_name( self.connection.get_hypervisor_version(), self.is_block_dev ) conf.source_device = disk_info['type'] conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = disk_info['dev'] conf.target_bus = disk_info['bus'] conf.serial = connection_info.get('serial') # Support for block size tuning data = {} if 'data' in connection_info: data = connection_info['data'] if 'logical_block_size' in data: conf.logical_block_size = data['logical_block_size'] if 'physical_block_size' in data: conf.physical_block_size = data['physical_block_size'] # Extract rate_limit control parameters if 'qos_specs' in data and data['qos_specs']: tune_opts = ['total_bytes_sec', 'read_bytes_sec', 'write_bytes_sec', 'total_iops_sec', 'read_iops_sec', 'write_iops_sec'] specs = data['qos_specs'] if isinstance(specs, dict): for k, v in specs.iteritems(): if k in tune_opts: new_key = 'disk_' + k setattr(conf, new_key, v) else: LOG.warn(_('Unknown content in connection_info/' 'qos_specs: %s') % specs) # Extract access_mode control parameters if 'access_mode' in data and data['access_mode']: access_mode = data['access_mode'] if access_mode in ('ro', 'rw'): conf.readonly = access_mode == 'ro' else: msg = (_('Unknown content in connection_info/access_mode: %s') % access_mode) LOG.error(msg) raise exception.InvalidVolumeAccessMode( access_mode=access_mode) return conf def disconnect_volume(self, connection_info, disk_dev): """Disconnect the volume.""" pass class LibvirtVolumeDriver(LibvirtBaseVolumeDriver): """Class for volumes backed by local file.""" def __init__(self, connection): super(LibvirtVolumeDriver, self).__init__(connection, is_block_dev=True) def connect_volume(self, connection_info, disk_info): """Connect the volume to a local device.""" conf = super(LibvirtVolumeDriver, self).connect_volume(connection_info, disk_info) conf.source_type = "block" conf.source_path = connection_info['data']['device_path'] return conf class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver): """Driver to attach fake volumes to libvirt.""" def __init__(self, connection): super(LibvirtFakeVolumeDriver, self).__init__(connection, is_block_dev=True) def connect_volume(self, connection_info, disk_info): """Connect the volume to a fake device.""" conf = super(LibvirtFakeVolumeDriver, self).connect_volume(connection_info, disk_info) conf.source_type = "network" conf.source_protocol = "fake" conf.source_name = "fake" return conf class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver): """Driver to attach Network volumes to libvirt.""" def __init__(self, connection): super(LibvirtNetVolumeDriver, self).__init__(connection, is_block_dev=False) def connect_volume(self, connection_info, disk_info): conf = super(LibvirtNetVolumeDriver, self).connect_volume(connection_info, disk_info) netdisk_properties = connection_info['data'] conf.source_type = "network" conf.source_protocol = connection_info['driver_volume_type'] conf.source_name = netdisk_properties.get('name') conf.source_hosts = netdisk_properties.get('hosts', []) conf.source_ports = netdisk_properties.get('ports', []) auth_enabled = netdisk_properties.get('auth_enabled') if (conf.source_protocol == 'rbd' and CONF.libvirt.rbd_secret_uuid): conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid auth_enabled = True # Force authentication locally if CONF.libvirt.rbd_user: conf.auth_username = CONF.libvirt.rbd_user if auth_enabled: conf.auth_username = (conf.auth_username or netdisk_properties['auth_username']) conf.auth_secret_type = netdisk_properties['secret_type'] conf.auth_secret_uuid = (conf.auth_secret_uuid or netdisk_properties['secret_uuid']) return conf class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver): """Driver to attach Network volumes to libvirt.""" def __init__(self, connection): super(LibvirtISCSIVolumeDriver, self).__init__(connection, is_block_dev=True) self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries self.use_multipath = CONF.libvirt.iscsi_use_multipath def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = utils.execute('iscsiadm', '-m', 'node', '-T', iscsi_properties['target_iqn'], '-p', iscsi_properties['target_portal'], *iscsi_command, run_as_root=True, check_exit_code=check_exit_code) LOG.debug(_("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s"), {'command': iscsi_command, 'out': out, 'err': err}) return (out, err) def _iscsiadm_update(self, iscsi_properties, property_key, property_value, **kwargs): iscsi_command = ('--op', 'update', '-n', property_key, '-v', property_value) return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs) def _get_target_portals_from_iscsiadm_output(self, output): # return both portals and iqns return [line.split() for line in output.splitlines()] @utils.synchronized('connect_volume') def connect_volume(self, connection_info, disk_info): """Attach the volume to instance_name.""" conf = super(LibvirtISCSIVolumeDriver, self).connect_volume(connection_info, disk_info) iscsi_properties = connection_info['data'] if self.use_multipath: #multipath installed, discovering other targets if available #multipath should be configured on the nova-compute node, #in order to fit storage vendor out = self._run_iscsiadm_bare(['-m', 'discovery', '-t', 'sendtargets', '-p', iscsi_properties['target_portal']], check_exit_code=[0, 255])[0] \ or "" # There are two types of iSCSI multipath devices. One which shares # the same iqn between multiple portals, and the other which use # different iqns on different portals. Try to identify the type by # checking the iscsiadm output if the iqn is used by multiple # portals. If it is, it's the former, so use the supplied iqn. # Otherwise, it's the latter, so try the ip,iqn combinations to # find the targets which constitutes the multipath device. ips_iqns = self._get_target_portals_from_iscsiadm_output(out) same_portal = False all_portals = set() match_portals = set() for ip, iqn in ips_iqns: all_portals.add(ip) if iqn == iscsi_properties['target_iqn']: match_portals.add(ip) if len(all_portals) == len(match_portals): same_portal = True for ip, iqn in ips_iqns: props = iscsi_properties.copy() props['target_portal'] = ip.split(",")[0] if not same_portal: props['target_iqn'] = iqn self._connect_to_iscsi_portal(props) self._rescan_iscsi() else: self._connect_to_iscsi_portal(iscsi_properties) # Detect new/resized LUNs for existing sessions self._run_iscsiadm(iscsi_properties, ("--rescan",)) host_device = self._get_host_device(iscsi_properties) # The /dev/disk/by-path/... node is not always present immediately # TODO(justinsb): This retry-with-delay is a pattern, move to utils? tries = 0 disk_dev = disk_info['dev'] while not os.path.exists(host_device): if tries >= self.num_scan_tries: raise exception.NovaException(_("iSCSI device not found at %s") % (host_device)) LOG.warn(_("ISCSI volume not yet found at: %(disk_dev)s. " "Will rescan & retry. Try number: %(tries)s"), {'disk_dev': disk_dev, 'tries': tries}) # The rescan isn't documented as being necessary(?), but it helps self._run_iscsiadm(iscsi_properties, ("--rescan",)) tries = tries + 1 if not os.path.exists(host_device): time.sleep(tries ** 2) if tries != 0: LOG.debug(_("Found iSCSI node %(disk_dev)s " "(after %(tries)s rescans)"), {'disk_dev': disk_dev, 'tries': tries}) if self.use_multipath: #we use the multipath device instead of the single path device self._rescan_multipath() multipath_device = self._get_multipath_device_name(host_device) if multipath_device is not None: host_device = multipath_device conf.source_type = "block" conf.source_path = host_device return conf @utils.synchronized('connect_volume') def disconnect_volume(self, connection_info, disk_dev): """Detach the volume from instance_name.""" iscsi_properties = connection_info['data'] host_device = self._get_host_device(iscsi_properties) multipath_device = None if self.use_multipath: multipath_device = self._get_multipath_device_name(host_device) super(LibvirtISCSIVolumeDriver, self).disconnect_volume(connection_info, disk_dev) if self.use_multipath and multipath_device: return self._disconnect_volume_multipath_iscsi(iscsi_properties, multipath_device) # NOTE(vish): Only disconnect from the target if no luns from the # target are in use. device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" % (iscsi_properties['target_portal'], iscsi_properties['target_iqn'])) devices = self.connection.get_all_block_devices() devices = [dev for dev in devices if dev.startswith(device_prefix)] if not devices: self._disconnect_from_iscsi_portal(iscsi_properties) elif host_device not in devices: # Delete device if LUN is not in use by another instance self._delete_device(host_device) def _delete_device(self, device_path): device_name = os.path.basename(os.path.realpath(device_path)) delete_control = '/sys/block/' + device_name + '/device/delete' if os.path.exists(delete_control): # Copy '1' from stdin to the device delete control file utils.execute('cp', '/dev/stdin', delete_control, process_input='1', run_as_root=True) else: LOG.warn(_("Unable to delete volume device %s"), device_name) def _remove_multipath_device_descriptor(self, disk_descriptor): disk_descriptor = disk_descriptor.replace('/dev/mapper/', '') try: self._run_multipath(['-f', disk_descriptor], check_exit_code=[0, 1]) except exception.ProcessExecutionError as exc: # Because not all cinder drivers need to remove the dev mapper, # here just logs a warning to avoid affecting those drivers in # exceptional cases. LOG.warn(_('Failed to remove multipath device descriptor ' '%(dev_mapper)s. Exception message: %(msg)s') % {'dev_mapper': disk_descriptor, 'msg': exc.message}) def _disconnect_volume_multipath_iscsi(self, iscsi_properties, multipath_device): self._rescan_iscsi() self._rescan_multipath() block_devices = self.connection.get_all_block_devices() devices = [] for dev in block_devices: if "/mapper/" in dev: devices.append(dev) else: mpdev = self._get_multipath_device_name(dev) if mpdev: devices.append(mpdev) # Do a discovery to find all targets. # Targets for multiple paths for the same multipath device # may not be the same. out = self._run_iscsiadm_bare(['-m', 'discovery', '-t', 'sendtargets', '-p', iscsi_properties['target_portal']], check_exit_code=[0, 255])[0] \ or "" # Extract targets for the current multipath device. ips_iqns = [] entries = self._get_iscsi_devices() for ip, iqn in self._get_target_portals_from_iscsiadm_output(out): ip_iqn = "%s-iscsi-%s" % (ip.split(",")[0], iqn) for entry in entries: entry_ip_iqn = entry.split("-lun-")[0] if entry_ip_iqn[:3] == "ip-": entry_ip_iqn = entry_ip_iqn[3:] if (ip_iqn != entry_ip_iqn): continue entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry) entry_mpdev = self._get_multipath_device_name(entry_real_path) if entry_mpdev == multipath_device: ips_iqns.append([ip, iqn]) break if not devices: # disconnect if no other multipath devices self._disconnect_mpath(iscsi_properties, ips_iqns) return # Get a target for all other multipath devices other_iqns = [self._get_multipath_iqn(device) for device in devices] # Get all the targets for the current multipath device current_iqns = [iqn for ip, iqn in ips_iqns] in_use = False for current in current_iqns: if current in other_iqns: in_use = True break # If no other multipath device attached has the same iqn # as the current device if not in_use: # disconnect if no other multipath devices with same iqn self._disconnect_mpath(iscsi_properties, ips_iqns) return elif multipath_device not in devices: # delete the devices associated w/ the unused multipath self._delete_mpath(iscsi_properties, multipath_device, ips_iqns) # else do not disconnect iscsi portals, # as they are used for other luns, # just remove multipath mapping device descriptor self._remove_multipath_device_descriptor(multipath_device) return def _connect_to_iscsi_portal(self, iscsi_properties): # NOTE(vish): If we are on the same host as nova volume, the # discovery makes the target so we don't need to # run --op new. Therefore, we check to see if the # target exists, and if we get 255 (Not Found), then # we run --op new. This will also happen if another # volume is using the same target. try: self._run_iscsiadm(iscsi_properties, ()) except processutils.ProcessExecutionError as exc: # iscsiadm returns 21 for "No records found" after version 2.0-871 if exc.exit_code in [21, 255]: self._reconnect(iscsi_properties) else: raise if iscsi_properties.get('auth_method'): self._iscsiadm_update(iscsi_properties, "node.session.auth.authmethod", iscsi_properties['auth_method']) self._iscsiadm_update(iscsi_properties, "node.session.auth.username", iscsi_properties['auth_username']) self._iscsiadm_update(iscsi_properties, "node.session.auth.password", iscsi_properties['auth_password']) #duplicate logins crash iscsiadm after load, #so we scan active sessions to see if the node is logged in. out = self._run_iscsiadm_bare(["-m", "session"], run_as_root=True, check_exit_code=[0, 1, 21])[0] or "" portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]} for p in out.splitlines() if p.startswith("tcp:")] stripped_portal = iscsi_properties['target_portal'].split(",")[0] if len(portals) == 0 or len([s for s in portals if stripped_portal == s['portal'].split(",")[0] and s['iqn'] == iscsi_properties['target_iqn']] ) == 0: try: self._run_iscsiadm(iscsi_properties, ("--login",), check_exit_code=[0, 255]) except processutils.ProcessExecutionError as err: #as this might be one of many paths, #only set successful logins to startup automatically if err.exit_code in [15]: self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") return self._iscsiadm_update(iscsi_properties, "node.startup", "automatic") def _disconnect_from_iscsi_portal(self, iscsi_properties): self._iscsiadm_update(iscsi_properties, "node.startup", "manual", check_exit_code=[0, 21, 255]) self._run_iscsiadm(iscsi_properties, ("--logout",), check_exit_code=[0, 21, 255]) self._run_iscsiadm(iscsi_properties, ('--op', 'delete'), check_exit_code=[0, 21, 255]) def _get_multipath_device_name(self, single_path_device): device = os.path.realpath(single_path_device) out = self._run_multipath(['-ll', device], check_exit_code=[0, 1])[0] mpath_line = [line for line in out.splitlines() if "scsi_id" not in line] # ignore udev errors if len(mpath_line) > 0 and len(mpath_line[0]) > 0: return "/dev/mapper/%s" % mpath_line[0].split(" ")[0] return None def _get_iscsi_devices(self): try: devices = list(os.walk('/dev/disk/by-path'))[0][-1] except IndexError: return [] return [entry for entry in devices if entry.startswith("ip-")] def _delete_mpath(self, iscsi_properties, multipath_device, ips_iqns): entries = self._get_iscsi_devices() # Loop through ips_iqns to construct all paths iqn_luns = [] for ip, iqn in ips_iqns: iqn_lun = '%s-lun-%s' % (iqn, iscsi_properties.get('target_lun', 0)) iqn_luns.append(iqn_lun) for dev in ['/dev/disk/by-path/%s' % dev for dev in entries]: for iqn_lun in iqn_luns: if iqn_lun in dev: self._delete_device(dev) self._rescan_multipath() def _disconnect_mpath(self, iscsi_properties, ips_iqns): for ip, iqn in ips_iqns: props = iscsi_properties.copy() props['target_portal'] = ip props['target_iqn'] = iqn self._disconnect_from_iscsi_portal(props) self._rescan_multipath() def _get_multipath_iqn(self, multipath_device): entries = self._get_iscsi_devices() for entry in entries: entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry) entry_multipath = self._get_multipath_device_name(entry_real_path) if entry_multipath == multipath_device: return entry.split("iscsi-")[1].split("-lun")[0] return None def _run_iscsiadm_bare(self, iscsi_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = utils.execute('iscsiadm', *iscsi_command, run_as_root=True, check_exit_code=check_exit_code) LOG.debug(_("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s"), {'command': iscsi_command, 'out': out, 'err': err}) return (out, err) def _run_multipath(self, multipath_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) (out, err) = utils.execute('multipath', *multipath_command, run_as_root=True, check_exit_code=check_exit_code) LOG.debug(_("multipath %(command)s: stdout=%(out)s stderr=%(err)s"), {'command': multipath_command, 'out': out, 'err': err}) return (out, err) def _rescan_iscsi(self): self._run_iscsiadm_bare(('-m', 'node', '--rescan'), check_exit_code=[0, 1, 21, 255]) self._run_iscsiadm_bare(('-m', 'session', '--rescan'), check_exit_code=[0, 1, 21, 255]) def _rescan_multipath(self): self._run_multipath(['-r'], check_exit_code=[0, 1, 21]) def _get_host_device(self, iscsi_properties): return ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" % (iscsi_properties['target_portal'], iscsi_properties['target_iqn'], iscsi_properties.get('target_lun', 0))) def _reconnect(self, iscsi_properties): self._run_iscsiadm(iscsi_properties, ('--op', 'new')) class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver): """Driver to attach Network volumes to libvirt.""" def __init__(self, connection): super(LibvirtISERVolumeDriver, self).__init__(connection) self.num_scan_tries = CONF.libvirt.num_iser_scan_tries self.use_multipath = CONF.libvirt.iser_use_multipath def _get_multipath_iqn(self, multipath_device): entries = self._get_iscsi_devices() for entry in entries: entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry) entry_multipath = self._get_multipath_device_name(entry_real_path) if entry_multipath == multipath_device: return entry.split("iser-")[1].split("-lun")[0] return None def _get_host_device(self, iser_properties): time.sleep(1) host_device = None device = ("ip-%s-iscsi-%s-lun-%s" % (iser_properties['target_portal'], iser_properties['target_iqn'], iser_properties.get('target_lun', 0))) look_for_device = glob.glob('/dev/disk/by-path/*%s' % device) if look_for_device: host_device = look_for_device[0] return host_device def _reconnect(self, iser_properties): self._run_iscsiadm(iser_properties, ('--interface', 'iser', '--op', 'new')) class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver): """Class implements libvirt part of volume driver for NFS.""" def __init__(self, connection): """Create back-end to nfs.""" super(LibvirtNFSVolumeDriver, self).__init__(connection, is_block_dev=False) def connect_volume(self, connection_info, disk_info): """Connect the volume. Returns xml for libvirt.""" conf = super(LibvirtNFSVolumeDriver, self).connect_volume(connection_info, disk_info) options = connection_info['data'].get('options') path = self._ensure_mounted(connection_info['data']['export'], options) path = os.path.join(path, connection_info['data']['name']) conf.source_type = 'file' conf.source_path = path conf.driver_format = connection_info['data'].get('format', 'raw') return conf def disconnect_volume(self, connection_info, disk_dev): """Disconnect the volume.""" export = connection_info['data']['export'] mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base, utils.get_hash_str(export)) try: utils.execute('umount', mount_path, run_as_root=True) except processutils.ProcessExecutionError as exc: if 'target is busy' in exc.message: LOG.debug(_("The NFS share %s is still in use."), export) else: LOG.exception(_("Couldn't unmount the NFS share %s"), export) def _ensure_mounted(self, nfs_export, options=None): """@type nfs_export: string @type options: string """ mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base, utils.get_hash_str(nfs_export)) if not virtutils.is_mounted(mount_path, nfs_export): self._mount_nfs(mount_path, nfs_export, options, ensure=True) return mount_path def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False): """Mount nfs export to mount path.""" utils.execute('mkdir', '-p', mount_path) # Construct the NFS mount command. nfs_cmd = ['mount', '-t', 'nfs'] if CONF.libvirt.nfs_mount_options is not None: nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options]) if options is not None: nfs_cmd.extend(options.split(' ')) nfs_cmd.extend([nfs_share, mount_path]) try: utils.execute(*nfs_cmd, run_as_root=True) except processutils.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.message: LOG.warn(_("%s is already mounted"), nfs_share) else: raise class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver): """Driver to attach AoE volumes to libvirt.""" def __init__(self, connection): super(LibvirtAOEVolumeDriver, self).__init__(connection, is_block_dev=True) def _aoe_discover(self): """Call aoe-discover (aoe-tools) AoE Discover.""" (out, err) = utils.execute('aoe-discover', run_as_root=True, check_exit_code=0) return (out, err) def _aoe_revalidate(self, aoedev): """Revalidate the LUN Geometry (When an AoE ID is reused).""" (out, err) = utils.execute('aoe-revalidate', aoedev, run_as_root=True, check_exit_code=0) return (out, err) def connect_volume(self, connection_info, mount_device): shelf = connection_info['data']['target_shelf'] lun = connection_info['data']['target_lun'] aoedev = 'e%s.%s' % (shelf, lun) aoedevpath = '/dev/etherd/%s' % (aoedev) if os.path.exists(aoedevpath): # NOTE(jbr_): If aoedevpath already exists, revalidate the LUN. self._aoe_revalidate(aoedev) else: # NOTE(jbr_): If aoedevpath does not exist, do a discover. self._aoe_discover() #NOTE(jbr_): Device path is not always present immediately def _wait_for_device_discovery(aoedevpath, mount_device): tries = self.tries if os.path.exists(aoedevpath): raise loopingcall.LoopingCallDone() if self.tries >= CONF.libvirt.num_aoe_discover_tries: raise exception.NovaException(_("AoE device not found at %s") % (aoedevpath)) LOG.warn(_("AoE volume not yet found at: %(aoedevpath)s. " "Try number: %(tries)s"), {'aoedevpath': aoedevpath, 'tries': tries}) self._aoe_discover() self.tries = self.tries + 1 self.tries = 0 timer = loopingcall.FixedIntervalLoopingCall( _wait_for_device_discovery, aoedevpath, mount_device) timer.start(interval=2).wait() tries = self.tries if tries != 0: LOG.debug(_("Found AoE device %(aoedevpath)s " "(after %(tries)s rediscover)"), {'aoedevpath': aoedevpath, 'tries': tries}) conf = super(LibvirtAOEVolumeDriver, self).connect_volume(connection_info, mount_device) conf.source_type = "block" conf.source_path = aoedevpath return conf class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver): """Class implements libvirt part of volume driver for GlusterFS.""" def __init__(self, connection): """Create back-end to glusterfs.""" super(LibvirtGlusterfsVolumeDriver, self).__init__(connection, is_block_dev=False) def connect_volume(self, connection_info, mount_device): """Connect the volume. Returns xml for libvirt.""" conf = super(LibvirtGlusterfsVolumeDriver, self).connect_volume(connection_info, mount_device) data = connection_info['data'] if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers: vol_name = data['export'].split('/')[1] source_host = data['export'].split('/')[0][:-1] conf.source_ports = ['24007'] conf.source_type = 'network' conf.source_protocol = 'gluster' conf.source_hosts = [source_host] conf.source_name = '%s/%s' % (vol_name, data['name']) else: path = self._ensure_mounted(data['export'], data.get('options')) path = os.path.join(path, data['name']) conf.source_type = 'file' conf.source_path = path conf.driver_format = connection_info['data'].get('format', 'raw') return conf def disconnect_volume(self, connection_info, disk_dev): """Disconnect the volume.""" if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers: return export = connection_info['data']['export'] mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base, utils.get_hash_str(export)) try: utils.execute('umount', mount_path, run_as_root=True) except processutils.ProcessExecutionError as exc: if 'target is busy' in exc.message: LOG.debug(_("The GlusterFS share %s is still in use."), export) else: LOG.exception(_("Couldn't unmount the GlusterFS share %s"), export) def _ensure_mounted(self, glusterfs_export, options=None): """@type glusterfs_export: string @type options: string """ mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base, utils.get_hash_str(glusterfs_export)) if not virtutils.is_mounted(mount_path, glusterfs_export): self._mount_glusterfs(mount_path, glusterfs_export, options, ensure=True) return mount_path def _mount_glusterfs(self, mount_path, glusterfs_share, options=None, ensure=False): """Mount glusterfs export to mount path.""" utils.execute('mkdir', '-p', mount_path) gluster_cmd = ['mount', '-t', 'glusterfs'] if options is not None: gluster_cmd.extend(options.split(' ')) gluster_cmd.extend([glusterfs_share, mount_path]) try: utils.execute(*gluster_cmd, run_as_root=True) except processutils.ProcessExecutionError as exc: if ensure and 'already mounted' in exc.message: LOG.warn(_("%s is already mounted"), glusterfs_share) else: raise class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver): """Driver to attach Fibre Channel Network volumes to libvirt.""" def __init__(self, connection): super(LibvirtFibreChannelVolumeDriver, self).__init__(connection, is_block_dev=False) def _get_pci_num(self, hba): # NOTE(walter-boring) # device path is in format of # /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2 # sometimes an extra entry exists before the host2 value # we always want the value prior to the host2 value pci_num = None if hba is not None: if "device_path" in hba: index = 0 device_path = hba['device_path'].split('/') for value in device_path: if value.startswith('host'): break index = index + 1 if index > 0: pci_num = device_path[index - 1] return pci_num @utils.synchronized('connect_volume') def connect_volume(self, connection_info, disk_info): """Attach the volume to instance_name.""" fc_properties = connection_info['data'] mount_device = disk_info["dev"] ports = fc_properties['target_wwn'] wwns = [] # we support a list of wwns or a single wwn if isinstance(ports, list): for wwn in ports: wwns.append(str(wwn)) elif isinstance(ports, six.string_types): wwns.append(str(ports)) # We need to look for wwns on every hba # because we don't know ahead of time # where they will show up. hbas = virtutils.get_fc_hbas_info() host_devices = [] for hba in hbas: pci_num = self._get_pci_num(hba) if pci_num is not None: for wwn in wwns: target_wwn = "0x%s" % wwn.lower() host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" % (pci_num, target_wwn, fc_properties.get('target_lun', 0))) host_devices.append(host_device) if len(host_devices) == 0: # this is empty because we don't have any FC HBAs msg = _("We are unable to locate any Fibre Channel devices") raise exception.NovaException(msg) # The /dev/disk/by-path/... node is not always present immediately # We only need to find the first device. Once we see the first device # multipath will have any others. def _wait_for_device_discovery(host_devices, mount_device): tries = self.tries for device in host_devices: LOG.debug(_("Looking for Fibre Channel dev %(device)s"), {'device': device}) if os.path.exists(device): self.host_device = device # get the /dev/sdX device. This is used # to find the multipath device. self.device_name = os.path.realpath(device) raise loopingcall.LoopingCallDone() if self.tries >= CONF.libvirt.num_iscsi_scan_tries: msg = _("Fibre Channel device not found.") raise exception.NovaException(msg) LOG.warn(_("Fibre volume not yet found at: %(mount_device)s. " "Will rescan & retry. Try number: %(tries)s"), {'mount_device': mount_device, 'tries': tries}) linuxscsi.rescan_hosts(hbas) self.tries = self.tries + 1 self.host_device = None self.device_name = None self.tries = 0 timer = loopingcall.FixedIntervalLoopingCall( _wait_for_device_discovery, host_devices, mount_device) timer.start(interval=2).wait() tries = self.tries if self.host_device is not None and self.device_name is not None: LOG.debug(_("Found Fibre Channel volume %(mount_device)s " "(after %(tries)s rescans)"), {'mount_device': mount_device, 'tries': tries}) # see if the new drive is part of a multipath # device. If so, we'll use the multipath device. mdev_info = linuxscsi.find_multipath_device(self.device_name) if mdev_info is not None: LOG.debug(_("Multipath device discovered %(device)s") % {'device': mdev_info['device']}) device_path = mdev_info['device'] connection_info['data']['devices'] = mdev_info['devices'] connection_info['data']['multipath_id'] = mdev_info['id'] else: # we didn't find a multipath device. # so we assume the kernel only sees 1 device device_path = self.host_device device_info = linuxscsi.get_device_info(self.device_name) connection_info['data']['devices'] = [device_info] conf = super(LibvirtFibreChannelVolumeDriver, self).connect_volume(connection_info, disk_info) conf.source_type = "block" conf.source_path = device_path return conf @utils.synchronized('connect_volume') def disconnect_volume(self, connection_info, mount_device): """Detach the volume from instance_name.""" super(LibvirtFibreChannelVolumeDriver, self).disconnect_volume(connection_info, mount_device) # If this is a multipath device, we need to search again # and make sure we remove all the devices. Some of them # might not have shown up at attach time. if 'multipath_id' in connection_info['data']: multipath_id = connection_info['data']['multipath_id'] mdev_info = linuxscsi.find_multipath_device(multipath_id) devices = mdev_info['devices'] LOG.debug(_("devices to remove = %s"), devices) else: # only needed when multipath-tools work improperly devices = connection_info['data'].get('devices', []) LOG.warn(_LW("multipath-tools probably work improperly. " "devices to remove = %s.") % devices) # There may have been more than 1 device mounted # by the kernel for this volume. We have to remove # all of them for device in devices: linuxscsi.remove_device(device) class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver): """Scality SOFS Nova driver. Provide hypervisors with access to sparse files on SOFS. """ def __init__(self, connection): """Create back-end to SOFS and check connection.""" super(LibvirtScalityVolumeDriver, self).__init__(connection, is_block_dev=False) def connect_volume(self, connection_info, disk_info): """Connect the volume. Returns xml for libvirt.""" self._check_prerequisites() self._mount_sofs() conf = super(LibvirtScalityVolumeDriver, self).connect_volume(connection_info, disk_info) path = os.path.join(CONF.libvirt.scality_sofs_mount_point, connection_info['data']['sofs_path']) conf.source_type = 'file' conf.source_path = path # The default driver cache policy is 'none', and this causes # qemu/kvm to open the volume file with O_DIRECT, which is # rejected by FUSE (on kernels older than 3.3). Scality SOFS # is FUSE based, so we must provide a more sensible default. conf.driver_cache = 'writethrough' return conf def _check_prerequisites(self): """Sanity checks before attempting to mount SOFS.""" # config is mandatory config = CONF.libvirt.scality_sofs_config if not config: msg = _("Value required for 'scality_sofs_config'") LOG.warn(msg) raise exception.NovaException(msg) # config can be a file path or a URL, check it if urlparse.urlparse(config).scheme == '': # turn local path into URL config = 'file://%s' % config try: urllib2.urlopen(config, timeout=5).close() except urllib2.URLError as e: msg = _("Cannot access 'scality_sofs_config': %s") % e LOG.warn(msg) raise exception.NovaException(msg) # mount.sofs must be installed if not os.access('/sbin/mount.sofs', os.X_OK): msg = _("Cannot execute /sbin/mount.sofs") LOG.warn(msg) raise exception.NovaException(msg) def _mount_sofs(self): config = CONF.libvirt.scality_sofs_config mount_path = CONF.libvirt.scality_sofs_mount_point sysdir = os.path.join(mount_path, 'sys') if not os.path.isdir(mount_path): utils.execute('mkdir', '-p', mount_path) if not os.path.isdir(sysdir): utils.execute('mount', '-t', 'sofs', config, mount_path, run_as_root=True) if not os.path.isdir(sysdir): msg = _("Cannot mount Scality SOFS, check syslog for errors") LOG.warn(msg) raise exception.NovaException(msg) nova-2014.1.5/nova/virt/libvirt/imagecache.py0000664000567000056700000005557412540642544022117 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Image cache manager. The cache manager implements the specification at http://wiki.openstack.org/nova-image-cache-management. """ import hashlib import json import os import re import time from oslo.config import cfg from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils from nova.virt import imagecache from nova.virt.libvirt import utils as virtutils LOG = logging.getLogger(__name__) imagecache_opts = [ cfg.StrOpt('image_info_filename_pattern', default='$instances_path/$image_cache_subdirectory_name/' '%(image)s.info', help='Allows image information files to be stored in ' 'non-standard locations', deprecated_group='DEFAULT'), cfg.BoolOpt('remove_unused_kernels', default=False, help='Should unused kernel images be removed? This is only ' 'safe to enable if all compute nodes have been updated ' 'to support this option. This will be enabled by default ' 'in future.', deprecated_group='DEFAULT'), cfg.IntOpt('remove_unused_resized_minimum_age_seconds', default=3600, help='Unused resized base images younger than this will not be ' 'removed', deprecated_group='DEFAULT'), cfg.BoolOpt('checksum_base_images', default=False, help='Write a checksum for files in _base to disk', deprecated_group='DEFAULT'), cfg.IntOpt('checksum_interval_seconds', default=3600, help='How frequently to checksum base images', deprecated_group='DEFAULT'), ] CONF = cfg.CONF CONF.register_opts(imagecache_opts, 'libvirt') CONF.import_opt('instances_path', 'nova.compute.manager') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') def get_cache_fname(images, key): """Return a filename based on the SHA1 hash of a given image ID. Image files stored in the _base directory that match this pattern are considered for cleanup by the image cache manager. The cache manager considers the file to be in use if it matches an instance's image_ref, kernel_id or ramdisk_id property. However, in grizzly-3 and before, only the image_ref property was considered. This means that it's unsafe to store kernel and ramdisk images using this pattern until we're sure that all compute nodes are running a cache manager newer than grizzly-3. For now, we require admins to confirm that by setting the remove_unused_kernels boolean but, at some point in the future, we'll be safely able to assume this. """ image_id = str(images[key]) if ((not CONF.libvirt.remove_unused_kernels and key in ['kernel_id', 'ramdisk_id'])): return image_id else: return hashlib.sha1(image_id).hexdigest() def get_info_filename(base_path): """Construct a filename for storing additional information about a base image. Returns a filename. """ base_file = os.path.basename(base_path) return (CONF.libvirt.image_info_filename_pattern % {'image': base_file}) def is_valid_info_file(path): """Test if a given path matches the pattern for info files.""" digest_size = hashlib.sha1().digestsize * 2 regexp = (CONF.libvirt.image_info_filename_pattern % {'image': ('([0-9a-f]{%(digest_size)d}|' '[0-9a-f]{%(digest_size)d}_sm|' '[0-9a-f]{%(digest_size)d}_[0-9]+)' % {'digest_size': digest_size})}) m = re.match(regexp, path) if m: return True return False def _read_possible_json(serialized, info_file): try: d = jsonutils.loads(serialized) except ValueError as e: LOG.error(_('Error reading image info file %(filename)s: ' '%(error)s'), {'filename': info_file, 'error': e}) d = {} return d def read_stored_info(target, field=None, timestamped=False): """Read information about an image. Returns an empty dictionary if there is no info, just the field value if a field is requested, or the entire dictionary otherwise. """ info_file = get_info_filename(target) if not os.path.exists(info_file): # NOTE(mikal): Special case to handle essex checksums being converted. # There is an assumption here that target is a base image filename. old_filename = target + '.sha1' if field == 'sha1' and os.path.exists(old_filename): hash_file = open(old_filename) hash_value = hash_file.read() hash_file.close() write_stored_info(target, field=field, value=hash_value) os.remove(old_filename) d = {field: hash_value} else: d = {} else: lock_name = 'info-%s' % os.path.split(target)[-1] lock_path = os.path.join(CONF.instances_path, 'locks') @utils.synchronized(lock_name, external=True, lock_path=lock_path) def read_file(info_file): LOG.debug(_('Reading image info file: %s'), info_file) with open(info_file, 'r') as f: return f.read().rstrip() serialized = read_file(info_file) d = _read_possible_json(serialized, info_file) if field: if timestamped: return (d.get(field, None), d.get('%s-timestamp' % field, None)) else: return d.get(field, None) return d def write_stored_info(target, field=None, value=None): """Write information about an image.""" if not field: return info_file = get_info_filename(target) LOG.info(_('Writing stored info to %s'), info_file) fileutils.ensure_tree(os.path.dirname(info_file)) lock_name = 'info-%s' % os.path.split(target)[-1] lock_path = os.path.join(CONF.instances_path, 'locks') @utils.synchronized(lock_name, external=True, lock_path=lock_path) def write_file(info_file, field, value): d = {} if os.path.exists(info_file): with open(info_file, 'r') as f: d = _read_possible_json(f.read(), info_file) d[field] = value d['%s-timestamp' % field] = time.time() with open(info_file, 'w') as f: f.write(json.dumps(d)) write_file(info_file, field, value) def _hash_file(filename): """Generate a hash for the contents of a file.""" checksum = hashlib.sha1() with open(filename) as f: for chunk in iter(lambda: f.read(32768), b''): checksum.update(chunk) return checksum.hexdigest() def read_stored_checksum(target, timestamped=True): """Read the checksum. Returns the checksum (as hex) or None. """ return read_stored_info(target, field='sha1', timestamped=timestamped) def write_stored_checksum(target): """Write a checksum to disk for a file in _base.""" write_stored_info(target, field='sha1', value=_hash_file(target)) class ImageCacheManager(imagecache.ImageCacheManager): def __init__(self): super(ImageCacheManager, self).__init__() self.lock_path = os.path.join(CONF.instances_path, 'locks') self._reset_state() def _reset_state(self): """Reset state variables used for each pass.""" self.used_images = {} self.image_popularity = {} self.instance_names = set() self.active_base_files = [] self.corrupt_base_files = [] self.originals = [] self.removable_base_files = [] self.unexplained_images = [] def _store_image(self, base_dir, ent, original=False): """Store a base image for later examination.""" entpath = os.path.join(base_dir, ent) if os.path.isfile(entpath): self.unexplained_images.append(entpath) if original: self.originals.append(entpath) def _list_base_images(self, base_dir): """Return a list of the images present in _base. Determine what images we have on disk. There will be other files in this directory so we only grab the ones which are the right length to be disk images. """ digest_size = hashlib.sha1().digestsize * 2 for ent in os.listdir(base_dir): if len(ent) == digest_size: self._store_image(base_dir, ent, original=True) elif (len(ent) > digest_size + 2 and ent[digest_size] == '_' and not is_valid_info_file(os.path.join(base_dir, ent))): self._store_image(base_dir, ent, original=False) return {'unexplained_images': self.unexplained_images, 'originals': self.originals} def _list_backing_images(self): """List the backing images currently in use.""" inuse_images = [] for ent in os.listdir(CONF.instances_path): if ent in self.instance_names: LOG.debug(_('%s is a valid instance name'), ent) disk_path = os.path.join(CONF.instances_path, ent, 'disk') if os.path.exists(disk_path): LOG.debug(_('%s has a disk file'), ent) try: backing_file = virtutils.get_disk_backing_file( disk_path) except processutils.ProcessExecutionError: # (for bug 1261442) if not os.path.exists(disk_path): LOG.debug(_('Failed to get disk backing file: %s'), disk_path) continue else: raise LOG.debug(_('Instance %(instance)s is backed by ' '%(backing)s'), {'instance': ent, 'backing': backing_file}) if backing_file: backing_path = os.path.join( CONF.instances_path, CONF.image_cache_subdirectory_name, backing_file) if backing_path not in inuse_images: inuse_images.append(backing_path) if backing_path in self.unexplained_images: LOG.warning(_('Instance %(instance)s is using a ' 'backing file %(backing)s which ' 'does not appear in the image ' 'service'), {'instance': ent, 'backing': backing_file}) self.unexplained_images.remove(backing_path) return inuse_images def _find_base_file(self, base_dir, fingerprint): """Find the base file matching this fingerprint. Yields the name of the base file, a boolean which is True if the image is "small", and a boolean which indicates if this is a resized image. Note that is is possible for more than one yield to result from this check. If no base file is found, then nothing is yielded. """ # The original file from glance base_file = os.path.join(base_dir, fingerprint) if os.path.exists(base_file): yield base_file, False, False # An older naming style which can be removed sometime after Folsom base_file = os.path.join(base_dir, fingerprint + '_sm') if os.path.exists(base_file): yield base_file, True, False # Resized images resize_re = re.compile('.*/%s_[0-9]+$' % fingerprint) for img in self.unexplained_images: m = resize_re.match(img) if m: yield img, False, True def _verify_checksum(self, img_id, base_file, create_if_missing=True): """Compare the checksum stored on disk with the current file. Note that if the checksum fails to verify this is logged, but no actual action occurs. This is something sysadmins should monitor for and handle manually when it occurs. """ if not CONF.libvirt.checksum_base_images: return None lock_name = 'hash-%s' % os.path.split(base_file)[-1] # Protect against other nova-computes performing checksums at the same # time if we are using shared storage @utils.synchronized(lock_name, external=True, lock_path=self.lock_path) def inner_verify_checksum(): (stored_checksum, stored_timestamp) = read_stored_checksum( base_file, timestamped=True) if stored_checksum: # NOTE(mikal): Checksums are timestamped. If we have recently # checksummed (possibly on another compute node if we are using # shared storage), then we don't need to checksum again. if (stored_timestamp and time.time() - stored_timestamp < CONF.libvirt.checksum_interval_seconds): return True # NOTE(mikal): If there is no timestamp, then the checksum was # performed by a previous version of the code. if not stored_timestamp: write_stored_info(base_file, field='sha1', value=stored_checksum) current_checksum = _hash_file(base_file) if current_checksum != stored_checksum: LOG.error(_('image %(id)s at (%(base_file)s): image ' 'verification failed'), {'id': img_id, 'base_file': base_file}) return False else: return True else: LOG.info(_('image %(id)s at (%(base_file)s): image ' 'verification skipped, no hash stored'), {'id': img_id, 'base_file': base_file}) # NOTE(mikal): If the checksum file is missing, then we should # create one. We don't create checksums when we download images # from glance because that would delay VM startup. if CONF.libvirt.checksum_base_images and create_if_missing: LOG.info(_('%(id)s (%(base_file)s): generating checksum'), {'id': img_id, 'base_file': base_file}) write_stored_checksum(base_file) return None return inner_verify_checksum() def _remove_base_file(self, base_file): """Remove a single base file if it is old enough. Returns nothing. """ if not os.path.exists(base_file): LOG.debug(_('Cannot remove %s, it does not exist'), base_file) return mtime = os.path.getmtime(base_file) age = time.time() - mtime maxage = CONF.libvirt.remove_unused_resized_minimum_age_seconds if base_file in self.originals: maxage = CONF.remove_unused_original_minimum_age_seconds if age < maxage: LOG.info(_('Base file too young to remove: %s'), base_file) else: LOG.info(_('Removing base file: %s'), base_file) try: os.remove(base_file) signature = get_info_filename(base_file) if os.path.exists(signature): os.remove(signature) except OSError as e: LOG.error(_('Failed to remove %(base_file)s, ' 'error was %(error)s'), {'base_file': base_file, 'error': e}) def _handle_base_image(self, img_id, base_file): """Handle the checks for a single base image.""" image_bad = False image_in_use = False LOG.info(_('image %(id)s at (%(base_file)s): checking'), {'id': img_id, 'base_file': base_file}) if base_file in self.unexplained_images: self.unexplained_images.remove(base_file) if (base_file and os.path.exists(base_file) and os.path.isfile(base_file)): # _verify_checksum returns True if the checksum is ok, and None if # there is no checksum file checksum_result = self._verify_checksum(img_id, base_file) if checksum_result is not None: image_bad = not checksum_result # Give other threads a chance to run time.sleep(0) instances = [] if img_id in self.used_images: local, remote, instances = self.used_images[img_id] if local > 0 or remote > 0: image_in_use = True LOG.info(_('image %(id)s at (%(base_file)s): ' 'in use: on this node %(local)d local, ' '%(remote)d on other nodes sharing this instance ' 'storage'), {'id': img_id, 'base_file': base_file, 'local': local, 'remote': remote}) self.active_base_files.append(base_file) if not base_file: LOG.warning(_('image %(id)s at (%(base_file)s): warning ' '-- an absent base file is in use! ' 'instances: %(instance_list)s'), {'id': img_id, 'base_file': base_file, 'instance_list': ' '.join(instances)}) if image_bad: self.corrupt_base_files.append(base_file) if base_file: if not image_in_use: LOG.debug(_('image %(id)s at (%(base_file)s): image is not in ' 'use'), {'id': img_id, 'base_file': base_file}) self.removable_base_files.append(base_file) else: LOG.debug(_('image %(id)s at (%(base_file)s): image is in ' 'use'), {'id': img_id, 'base_file': base_file}) if os.path.exists(base_file): virtutils.chown(base_file, os.getuid()) os.utime(base_file, None) def _age_and_verify_cached_images(self, context, all_instances, base_dir): LOG.debug(_('Verify base images')) # Determine what images are on disk because they're in use for img in self.used_images: fingerprint = hashlib.sha1(img).hexdigest() LOG.debug(_('Image id %(id)s yields fingerprint %(fingerprint)s'), {'id': img, 'fingerprint': fingerprint}) for result in self._find_base_file(base_dir, fingerprint): base_file, image_small, image_resized = result self._handle_base_image(img, base_file) if not image_small and not image_resized: self.originals.append(base_file) # Elements remaining in unexplained_images might be in use inuse_backing_images = self._list_backing_images() for backing_path in inuse_backing_images: if backing_path not in self.active_base_files: self.active_base_files.append(backing_path) # Anything left is an unknown base image for img in self.unexplained_images: LOG.warning(_('Unknown base file: %s'), img) self.removable_base_files.append(img) # Dump these lists if self.active_base_files: LOG.info(_('Active base files: %s'), ' '.join(self.active_base_files)) if self.corrupt_base_files: LOG.info(_('Corrupt base files: %s'), ' '.join(self.corrupt_base_files)) if self.removable_base_files: LOG.info(_('Removable base files: %s'), ' '.join(self.removable_base_files)) if self.remove_unused_base_images: for base_file in self.removable_base_files: self._remove_base_file(base_file) # That's it LOG.debug(_('Verification complete')) def _get_base(self): # NOTE(mikal): The new scheme for base images is as follows -- an # image is streamed from the image service to _base (filename is the # sha1 hash of the image id). If CoW is enabled, that file is then # resized to be the correct size for the instance (filename is the # same as the original, but with an underscore and the resized size # in bytes). This second file is then CoW'd to the instance disk. If # CoW is disabled, the resize occurs as part of the copy from the # cache to the instance directory. Files ending in _sm are no longer # created, but may remain from previous versions. base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if not os.path.exists(base_dir): LOG.debug(_('Skipping verification, no base directory at %s'), base_dir) return return base_dir def update(self, context, all_instances): base_dir = self._get_base() if not base_dir: return # reset the local statistics self._reset_state() # read the cached images self._list_base_images(base_dir) # read running instances data running = self._list_running_instances(context, all_instances) self.used_images = running['used_images'] self.image_popularity = running['image_popularity'] self.instance_names = running['instance_names'] # perform the aging and image verification self._age_and_verify_cached_images(context, all_instances, base_dir) nova-2014.1.5/nova/virt/libvirt/config.py0000664000567000056700000013403112540642544021300 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012-2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Configuration for libvirt objects. Classes to represent the configuration of various libvirt objects and support conversion to/from XML. These classes are solely concerned by providing direct Object <-> XML document conversions. No policy or operational decisions should be made by code in these classes. Such policy belongs in the 'designer.py' module which provides simplified helpers for populating up config object instances. """ from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import units from lxml import etree LOG = logging.getLogger(__name__) class LibvirtConfigObject(object): def __init__(self, **kwargs): super(LibvirtConfigObject, self).__init__() self.root_name = kwargs.get("root_name") self.ns_prefix = kwargs.get('ns_prefix') self.ns_uri = kwargs.get('ns_uri') @staticmethod def _text_node(name, value): child = etree.Element(name) child.text = str(value) return child def format_dom(self): if self.ns_uri is None: return etree.Element(self.root_name) else: return etree.Element("{" + self.ns_uri + "}" + self.root_name, nsmap={self.ns_prefix: self.ns_uri}) def parse_str(self, xmlstr): self.parse_dom(etree.fromstring(xmlstr)) def parse_dom(self, xmldoc): if self.root_name != xmldoc.tag: raise exception.InvalidInput( "Root element name should be '%s' not '%s'" % (self.root_name, xmldoc.tag)) def to_xml(self, pretty_print=True): root = self.format_dom() xml_str = etree.tostring(root, pretty_print=pretty_print) LOG.debug(_("Generated XML %s "), (xml_str,)) return xml_str class LibvirtConfigCaps(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCaps, self).__init__(root_name="capabilities", **kwargs) self.host = None self.guests = [] def parse_dom(self, xmldoc): super(LibvirtConfigCaps, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "host": host = LibvirtConfigCapsHost() host.parse_dom(c) self.host = host elif c.tag == "guest": guest = LibvirtConfigCapsGuest() guest.parse_dom(c) self.guests.append(guest) def format_dom(self): caps = super(LibvirtConfigCaps, self).format_dom() if self.host: caps.append(self.host.format_dom()) for g in self.guests: caps.append(g.format_dom()) return caps class LibvirtConfigCapsHost(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsHost, self).__init__(root_name="host", **kwargs) self.cpu = None self.uuid = None def parse_dom(self, xmldoc): super(LibvirtConfigCapsHost, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "cpu": cpu = LibvirtConfigCPU() cpu.parse_dom(c) self.cpu = cpu elif c.tag == "uuid": self.uuid = c.text def format_dom(self): caps = super(LibvirtConfigCapsHost, self).format_dom() if self.uuid: caps.append(self._text_node("uuid", self.uuid)) if self.cpu: caps.append(self.cpu.format_dom()) return caps class LibvirtConfigCapsGuest(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCapsGuest, self).__init__(root_name="guest", **kwargs) self.arch = None self.ostype = None self.domtype = list() def parse_dom(self, xmldoc): super(LibvirtConfigCapsGuest, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "os_type": self.ostype = c.text elif c.tag == "arch": self.arch = c.get("name") for sc in c.getchildren(): if sc.tag == "domain": self.domtype.append(sc.get("type")) def format_dom(self): caps = super(LibvirtConfigCapsGuest, self).format_dom() if self.ostype is not None: caps.append(self._text_node("os_type", self.ostype)) if self.arch: arch = etree.Element("arch", name=self.arch) for dt in self.domtype: dte = etree.Element("domain") dte.set("type", dt) arch.append(dte) caps.append(arch) return caps class LibvirtConfigGuestTimer(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestTimer, self).__init__(root_name="timer", **kwargs) self.name = "platform" self.track = None self.tickpolicy = None self.present = None def format_dom(self): tm = super(LibvirtConfigGuestTimer, self).format_dom() tm.set("name", self.name) if self.track is not None: tm.set("track", self.track) if self.tickpolicy is not None: tm.set("tickpolicy", self.tickpolicy) if self.present is not None: if self.present: tm.set("present", "yes") else: tm.set("present", "no") return tm class LibvirtConfigGuestClock(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestClock, self).__init__(root_name="clock", **kwargs) self.offset = "utc" self.adjustment = None self.timezone = None self.timers = [] def format_dom(self): clk = super(LibvirtConfigGuestClock, self).format_dom() clk.set("offset", self.offset) if self.adjustment: clk.set("adjustment", self.adjustment) elif self.timezone: clk.set("timezone", self.timezone) for tm in self.timers: clk.append(tm.format_dom()) return clk def add_timer(self, tm): self.timers.append(tm) class LibvirtConfigCPUFeature(LibvirtConfigObject): def __init__(self, name=None, **kwargs): super(LibvirtConfigCPUFeature, self).__init__(root_name='feature', **kwargs) self.name = name def parse_dom(self, xmldoc): super(LibvirtConfigCPUFeature, self).parse_dom(xmldoc) self.name = xmldoc.get("name") def format_dom(self): ft = super(LibvirtConfigCPUFeature, self).format_dom() ft.set("name", self.name) return ft def __eq__(self, obj): return obj.name == self.name def __ne__(self, obj): return obj.name != self.name def __hash__(self): return hash(self.name) class LibvirtConfigCPU(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigCPU, self).__init__(root_name='cpu', **kwargs) self.arch = None self.vendor = None self.model = None self.sockets = None self.cores = None self.threads = None self.features = set() def parse_dom(self, xmldoc): super(LibvirtConfigCPU, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "arch": self.arch = c.text elif c.tag == "model": self.model = c.text elif c.tag == "vendor": self.vendor = c.text elif c.tag == "topology": self.sockets = int(c.get("sockets")) self.cores = int(c.get("cores")) self.threads = int(c.get("threads")) elif c.tag == "feature": f = LibvirtConfigCPUFeature() f.parse_dom(c) self.add_feature(f) def format_dom(self): cpu = super(LibvirtConfigCPU, self).format_dom() if self.arch is not None: cpu.append(self._text_node("arch", self.arch)) if self.model is not None: cpu.append(self._text_node("model", self.model)) if self.vendor is not None: cpu.append(self._text_node("vendor", self.vendor)) if (self.sockets is not None and self.cores is not None and self.threads is not None): top = etree.Element("topology") top.set("sockets", str(self.sockets)) top.set("cores", str(self.cores)) top.set("threads", str(self.threads)) cpu.append(top) # sorting the features to allow more predictable tests for f in sorted(self.features, key=lambda x: x.name): cpu.append(f.format_dom()) return cpu def add_feature(self, feat): self.features.add(feat) class LibvirtConfigGuestCPUFeature(LibvirtConfigCPUFeature): def __init__(self, name=None, **kwargs): super(LibvirtConfigGuestCPUFeature, self).__init__(name, **kwargs) self.policy = "require" def format_dom(self): ft = super(LibvirtConfigGuestCPUFeature, self).format_dom() ft.set("policy", self.policy) return ft class LibvirtConfigGuestCPU(LibvirtConfigCPU): def __init__(self, **kwargs): super(LibvirtConfigGuestCPU, self).__init__(**kwargs) self.mode = None self.match = "exact" def parse_dom(self, xmldoc): super(LibvirtConfigGuestCPU, self).parse_dom(xmldoc) self.mode = xmldoc.get('mode') self.match = xmldoc.get('match') def format_dom(self): cpu = super(LibvirtConfigGuestCPU, self).format_dom() if self.mode: cpu.set("mode", self.mode) cpu.set("match", self.match) return cpu class LibvirtConfigGuestSMBIOS(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSMBIOS, self).__init__(root_name="smbios", **kwargs) self.mode = "sysinfo" def format_dom(self): smbios = super(LibvirtConfigGuestSMBIOS, self).format_dom() smbios.set("mode", self.mode) return smbios class LibvirtConfigGuestSysinfo(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSysinfo, self).__init__(root_name="sysinfo", **kwargs) self.type = "smbios" self.bios_vendor = None self.bios_version = None self.system_manufacturer = None self.system_product = None self.system_version = None self.system_serial = None self.system_uuid = None def format_dom(self): sysinfo = super(LibvirtConfigGuestSysinfo, self).format_dom() sysinfo.set("type", self.type) bios = None system = None if self.bios_vendor is not None: if bios is None: bios = etree.Element("bios") info = etree.Element("entry", name="vendor") info.text = self.bios_vendor bios.append(info) if self.bios_version is not None: if bios is None: bios = etree.Element("bios") info = etree.Element("entry", name="version") info.text = self.bios_version bios.append(info) if self.system_manufacturer is not None: if system is None: system = etree.Element("system") info = etree.Element("entry", name="manufacturer") info.text = self.system_manufacturer system.append(info) if self.system_product is not None: if system is None: system = etree.Element("system") info = etree.Element("entry", name="product") info.text = self.system_product system.append(info) if self.system_version is not None: if system is None: system = etree.Element("system") info = etree.Element("entry", name="version") info.text = self.system_version system.append(info) if self.system_serial is not None: if system is None: system = etree.Element("system") info = etree.Element("entry", name="serial") info.text = self.system_serial system.append(info) if self.system_uuid is not None: if system is None: system = etree.Element("system") info = etree.Element("entry", name="uuid") info.text = self.system_uuid system.append(info) if bios is not None: sysinfo.append(bios) if system is not None: sysinfo.append(system) return sysinfo class LibvirtConfigGuestDevice(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestDevice, self).__init__(**kwargs) class LibvirtConfigGuestDisk(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestDisk, self).__init__(root_name="disk", **kwargs) self.source_type = "file" self.source_device = "disk" self.driver_name = None self.driver_format = None self.driver_cache = None self.source_path = None self.source_protocol = None self.source_name = None self.source_hosts = [] self.source_ports = [] self.target_dev = None self.target_path = None self.target_bus = None self.auth_username = None self.auth_secret_type = None self.auth_secret_uuid = None self.serial = None self.disk_read_bytes_sec = None self.disk_read_iops_sec = None self.disk_write_bytes_sec = None self.disk_write_iops_sec = None self.disk_total_bytes_sec = None self.disk_total_iops_sec = None self.logical_block_size = None self.physical_block_size = None self.readonly = False self.snapshot = None def format_dom(self): dev = super(LibvirtConfigGuestDisk, self).format_dom() dev.set("type", self.source_type) dev.set("device", self.source_device) if (self.driver_name is not None or self.driver_format is not None or self.driver_cache is not None): drv = etree.Element("driver") if self.driver_name is not None: drv.set("name", self.driver_name) if self.driver_format is not None: drv.set("type", self.driver_format) if self.driver_cache is not None: drv.set("cache", self.driver_cache) dev.append(drv) if self.source_type == "file": dev.append(etree.Element("source", file=self.source_path)) elif self.source_type == "block": dev.append(etree.Element("source", dev=self.source_path)) elif self.source_type == "mount": dev.append(etree.Element("source", dir=self.source_path)) elif self.source_type == "network": source = etree.Element("source", protocol=self.source_protocol) if self.source_name is not None: source.set('name', self.source_name) hosts_info = zip(self.source_hosts, self.source_ports) for name, port in hosts_info: host = etree.Element('host', name=name) if port is not None: host.set('port', port) source.append(host) dev.append(source) if self.auth_secret_type is not None: auth = etree.Element("auth") auth.set("username", self.auth_username) auth.append(etree.Element("secret", type=self.auth_secret_type, uuid=self.auth_secret_uuid)) dev.append(auth) if self.source_type == "mount": dev.append(etree.Element("target", dir=self.target_path)) else: dev.append(etree.Element("target", dev=self.target_dev, bus=self.target_bus)) if self.serial is not None: dev.append(self._text_node("serial", self.serial)) iotune = etree.Element("iotune") if self.disk_read_bytes_sec is not None: iotune.append(self._text_node("read_bytes_sec", self.disk_read_bytes_sec)) if self.disk_read_iops_sec is not None: iotune.append(self._text_node("read_iops_sec", self.disk_read_iops_sec)) if self.disk_write_bytes_sec is not None: iotune.append(self._text_node("write_bytes_sec", self.disk_write_bytes_sec)) if self.disk_write_iops_sec is not None: iotune.append(self._text_node("write_iops_sec", self.disk_write_iops_sec)) if self.disk_total_bytes_sec is not None: iotune.append(self._text_node("total_bytes_sec", self.disk_total_bytes_sec)) if self.disk_total_iops_sec is not None: iotune.append(self._text_node("total_iops_sec", self.disk_total_iops_sec)) if len(iotune) > 0: dev.append(iotune) # Block size tuning if (self.logical_block_size is not None or self.physical_block_size is not None): blockio = etree.Element("blockio") if self.logical_block_size is not None: blockio.set('logical_block_size', self.logical_block_size) if self.physical_block_size is not None: blockio.set('physical_block_size', self.physical_block_size) dev.append(blockio) if self.readonly: dev.append(etree.Element("readonly")) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestDisk, self).parse_dom(xmldoc) self.source_type = xmldoc.get('type') self.snapshot = xmldoc.get('snapshot') for c in xmldoc.getchildren(): if c.tag == 'driver': self.driver_name = c.get('name') self.driver_format = c.get('type') self.driver_cache = c.get('cache') elif c.tag == 'source': if self.source_type == 'file': self.source_path = c.get('file') elif self.source_type == 'block': self.source_path = c.get('dev') elif self.source_type == 'mount': self.source_path = c.get('dir') elif self.source_type == 'network': self.source_protocol = c.get('protocol') self.source_name = c.get('name') elif c.tag == 'serial': self.serial = c.text for c in xmldoc.getchildren(): if c.tag == 'target': if self.source_type == 'mount': self.target_path = c.get('dir') else: self.target_dev = c.get('dev') self.target_bus = c.get('bus', None) class LibvirtConfigGuestSnapshotDisk(LibvirtConfigObject): """Disk class for handling disk information in snapshots. Similar to LibvirtConfigGuestDisk, but used to represent disk entities in structures rather than real devices. These typically have fewer members, and different expectations for which fields are required. """ def __init__(self, **kwargs): super(LibvirtConfigGuestSnapshotDisk, self).__init__(root_name="disk", **kwargs) self.source_type = None self.source_device = None self.name = None self.snapshot = None self.driver_name = None self.driver_format = None self.driver_cache = None self.source_path = None self.source_protocol = None self.source_name = None self.source_hosts = [] self.source_ports = [] self.target_dev = None self.target_path = None self.target_bus = None self.auth_username = None self.auth_secret_type = None self.auth_secret_uuid = None self.serial = None def format_dom(self): dev = super(LibvirtConfigGuestSnapshotDisk, self).format_dom() if self.name: dev.attrib['name'] = self.name if self.snapshot: dev.attrib['snapshot'] = self.snapshot if self.source_type: dev.set("type", self.source_type) if self.source_device: dev.set("device", self.source_device) if (self.driver_name is not None or self.driver_format is not None or self.driver_cache is not None): drv = etree.Element("driver") if self.driver_name is not None: drv.set("name", self.driver_name) if self.driver_format is not None: drv.set("type", self.driver_format) if self.driver_cache is not None: drv.set("cache", self.driver_cache) dev.append(drv) if self.source_type == "file": dev.append(etree.Element("source", file=self.source_path)) elif self.source_type == "block": dev.append(etree.Element("source", dev=self.source_path)) elif self.source_type == "mount": dev.append(etree.Element("source", dir=self.source_path)) elif self.source_type == "network": source = etree.Element("source", protocol=self.source_protocol) if self.source_name is not None: source.set('name', self.source_name) hosts_info = zip(self.source_hosts, self.source_ports) for name, port in hosts_info: host = etree.Element('host', name=name) if port is not None: host.set('port', port) source.append(host) dev.append(source) if self.auth_secret_type is not None: auth = etree.Element("auth") auth.set("username", self.auth_username) auth.append(etree.Element("secret", type=self.auth_secret_type, uuid=self.auth_secret_uuid)) dev.append(auth) if self.source_type == "mount": dev.append(etree.Element("target", dir=self.target_path)) else: if self.target_bus and self.target_dev: dev.append(etree.Element("target", dev=self.target_dev, bus=self.target_bus)) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestSnapshotDisk, self).parse_dom(xmldoc) self.source_type = xmldoc.get('type') self.snapshot = xmldoc.get('snapshot') for c in xmldoc.getchildren(): if c.tag == 'driver': self.driver_name = c.get('name') self.driver_format = c.get('type') self.driver_cache = c.get('cache') elif c.tag == 'source': if self.source_type == 'file': self.source_path = c.get('file') elif self.source_type == 'block': self.source_path = c.get('dev') elif self.source_type == 'mount': self.source_path = c.get('dir') elif self.source_type == 'network': self.source_protocol = c.get('protocol') self.source_name = c.get('name') elif c.tag == 'serial': self.serial = c.text for c in xmldoc.getchildren(): if c.tag == 'target': if self.source_type == 'mount': self.target_path = c.get('dir') else: self.target_dev = c.get('dev') self.target_bus = c.get('bus', None) class LibvirtConfigGuestFilesys(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestFilesys, self).__init__(root_name="filesystem", **kwargs) self.source_type = "mount" self.source_dir = None self.target_dir = "/" def format_dom(self): dev = super(LibvirtConfigGuestFilesys, self).format_dom() dev.set("type", self.source_type) dev.append(etree.Element("source", dir=self.source_dir)) dev.append(etree.Element("target", dir=self.target_dir)) return dev class LibvirtConfigGuestInterface(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestInterface, self).__init__( root_name="interface", **kwargs) self.net_type = None self.target_dev = None self.model = None self.mac_addr = None self.script = None self.source_dev = None self.source_mode = "private" self.vporttype = None self.vportparams = [] self.filtername = None self.filterparams = [] self.driver_name = None self.vif_inbound_peak = None self.vif_inbound_burst = None self.vif_inbound_average = None self.vif_outbound_peak = None self.vif_outbound_burst = None self.vif_outbound_average = None def format_dom(self): dev = super(LibvirtConfigGuestInterface, self).format_dom() dev.set("type", self.net_type) dev.append(etree.Element("mac", address=self.mac_addr)) if self.model: dev.append(etree.Element("model", type=self.model)) if self.driver_name: dev.append(etree.Element("driver", name=self.driver_name)) if self.net_type == "ethernet": if self.script is not None: dev.append(etree.Element("script", path=self.script)) elif self.net_type == "direct": dev.append(etree.Element("source", dev=self.source_dev, mode=self.source_mode)) else: dev.append(etree.Element("source", bridge=self.source_dev)) if self.target_dev is not None: dev.append(etree.Element("target", dev=self.target_dev)) if self.vporttype is not None: vport = etree.Element("virtualport", type=self.vporttype) for p in self.vportparams: param = etree.Element("parameters") param.set(p['key'], p['value']) vport.append(param) dev.append(vport) if self.filtername is not None: filter = etree.Element("filterref", filter=self.filtername) for p in self.filterparams: filter.append(etree.Element("parameter", name=p['key'], value=p['value'])) dev.append(filter) if self.vif_inbound_average or self.vif_outbound_average: bandwidth = etree.Element("bandwidth") if self.vif_inbound_average is not None: vif_inbound = etree.Element("inbound", average=str(self.vif_inbound_average)) if self.vif_inbound_peak is not None: vif_inbound.set("peak", str(self.vif_inbound_peak)) if self.vif_inbound_burst is not None: vif_inbound.set("burst", str(self.vif_inbound_burst)) bandwidth.append(vif_inbound) if self.vif_outbound_average is not None: vif_outbound = etree.Element("outbound", average=str(self.vif_outbound_average)) if self.vif_outbound_peak is not None: vif_outbound.set("peak", str(self.vif_outbound_peak)) if self.vif_outbound_burst is not None: vif_outbound.set("burst", str(self.vif_outbound_burst)) bandwidth.append(vif_outbound) dev.append(bandwidth) return dev def add_filter_param(self, key, value): self.filterparams.append({'key': key, 'value': value}) def add_vport_param(self, key, value): self.vportparams.append({'key': key, 'value': value}) class LibvirtConfigGuestInput(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestInput, self).__init__(root_name="input", **kwargs) self.type = "tablet" self.bus = "usb" def format_dom(self): dev = super(LibvirtConfigGuestInput, self).format_dom() dev.set("type", self.type) dev.set("bus", self.bus) return dev class LibvirtConfigGuestGraphics(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestGraphics, self).__init__(root_name="graphics", **kwargs) self.type = "vnc" self.autoport = True self.keymap = None self.listen = None def format_dom(self): dev = super(LibvirtConfigGuestGraphics, self).format_dom() dev.set("type", self.type) if self.autoport: dev.set("autoport", "yes") else: dev.set("autoport", "no") if self.keymap: dev.set("keymap", self.keymap) if self.listen: dev.set("listen", self.listen) return dev class LibvirtConfigSeclabel(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigSeclabel, self).__init__(root_name="seclabel", **kwargs) self.type = 'dynamic' self.baselabel = None def format_dom(self): seclabel = super(LibvirtConfigSeclabel, self).format_dom() seclabel.set('type', self.type) if self.baselabel: seclabel.append(self._text_node("baselabel", self.baselabel)) return seclabel class LibvirtConfigGuestVideo(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestVideo, self).__init__(root_name="video", **kwargs) self.type = 'cirrus' self.vram = None self.heads = None def format_dom(self): dev = super(LibvirtConfigGuestVideo, self).format_dom() model = etree.Element("model") model.set("type", self.type) if self.vram: model.set("vram", str(self.vram)) if self.heads: model.set("heads", str(self.heads)) dev.append(model) return dev class LibvirtConfigGuestController(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestController, self).__init__(root_name="controller", **kwargs) self.type = None self.index = None self.model = None def format_dom(self): controller = super(LibvirtConfigGuestController, self).format_dom() controller.set("type", self.type) if self.index is not None: controller.set("index", str(self.index)) if self.model: controller.set("model", str(self.model)) return controller class LibvirtConfigGuestHostdev(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestHostdev, self).\ __init__(root_name="hostdev", **kwargs) self.mode = kwargs.get('mode') self.type = kwargs.get('type') self.managed = 'yes' def format_dom(self): dev = super(LibvirtConfigGuestHostdev, self).format_dom() dev.set("mode", self.mode) dev.set("type", self.type) dev.set("managed", self.managed) return dev def parse_dom(self, xmldoc): super(LibvirtConfigGuestHostdev, self).parse_dom(xmldoc) self.mode = xmldoc.get('mode') self.type = xmldoc.get('type') self.managed = xmldoc.get('managed') return xmldoc.getchildren() class LibvirtConfigGuestHostdevPCI(LibvirtConfigGuestHostdev): def __init__(self, **kwargs): super(LibvirtConfigGuestHostdevPCI, self).\ __init__(mode='subsystem', type='pci', **kwargs) self.domain = None self.bus = None self.slot = None self.function = None def format_dom(self): dev = super(LibvirtConfigGuestHostdevPCI, self).format_dom() address = etree.Element("address", domain='0x' + self.domain, bus='0x' + self.bus, slot='0x' + self.slot, function='0x' + self.function) source = etree.Element("source") source.append(address) dev.append(source) return dev def parse_dom(self, xmldoc): childs = super(LibvirtConfigGuestHostdevPCI, self).parse_dom(xmldoc) for c in childs: if c.tag == "source": for sub in c.getchildren(): if sub.tag == 'address': self.domain = sub.get('domain') self.bus = sub.get('bus') self.slot = sub.get('slot') self.function = sub.get('function') class LibvirtConfigGuestCharBase(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestCharBase, self).__init__(**kwargs) self.type = "pty" self.source_path = None def format_dom(self): dev = super(LibvirtConfigGuestCharBase, self).format_dom() dev.set("type", self.type) if self.type == "file": dev.append(etree.Element("source", path=self.source_path)) elif self.type == "unix": dev.append(etree.Element("source", mode="bind", path=self.source_path)) return dev class LibvirtConfigGuestChar(LibvirtConfigGuestCharBase): def __init__(self, **kwargs): super(LibvirtConfigGuestChar, self).__init__(**kwargs) self.target_port = None def format_dom(self): dev = super(LibvirtConfigGuestChar, self).format_dom() if self.target_port is not None: dev.append(etree.Element("target", port=str(self.target_port))) return dev class LibvirtConfigGuestSerial(LibvirtConfigGuestChar): def __init__(self, **kwargs): super(LibvirtConfigGuestSerial, self).__init__(root_name="serial", **kwargs) class LibvirtConfigGuestConsole(LibvirtConfigGuestChar): def __init__(self, **kwargs): super(LibvirtConfigGuestConsole, self).__init__(root_name="console", **kwargs) class LibvirtConfigGuestChannel(LibvirtConfigGuestCharBase): def __init__(self, **kwargs): super(LibvirtConfigGuestChannel, self).__init__(root_name="channel", **kwargs) self.target_type = "virtio" self.target_name = None def format_dom(self): dev = super(LibvirtConfigGuestChannel, self).format_dom() target = etree.Element("target", type=self.target_type) if self.target_name is not None: target.set("name", self.target_name) dev.append(target) return dev class LibvirtConfigGuestWatchdog(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestWatchdog, self).__init__(root_name="watchdog", **kwargs) self.model = 'i6300esb' self.action = 'reset' def format_dom(self): dev = super(LibvirtConfigGuestWatchdog, self).format_dom() dev.set('model', self.model) dev.set('action', self.action) return dev class LibvirtConfigGuest(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuest, self).__init__(root_name="domain", **kwargs) self.virt_type = None self.uuid = None self.name = None self.memory = 500 * units.Mi self.vcpus = 1 self.cpuset = None self.cpu = None self.cpu_shares = None self.cpu_quota = None self.cpu_period = None self.acpi = False self.apic = False self.clock = None self.sysinfo = None self.os_type = None self.os_loader = None self.os_kernel = None self.os_initrd = None self.os_cmdline = None self.os_root = None self.os_init_path = None self.os_boot_dev = [] self.os_smbios = None self.os_mach_type = None self.devices = [] def _format_basic_props(self, root): root.append(self._text_node("uuid", self.uuid)) root.append(self._text_node("name", self.name)) root.append(self._text_node("memory", self.memory)) if self.cpuset is not None: vcpu = self._text_node("vcpu", self.vcpus) vcpu.set("cpuset", self.cpuset) root.append(vcpu) else: root.append(self._text_node("vcpu", self.vcpus)) def _format_os(self, root): os = etree.Element("os") type_node = self._text_node("type", self.os_type) if self.os_mach_type is not None: type_node.set("machine", self.os_mach_type) os.append(type_node) if self.os_kernel is not None: os.append(self._text_node("kernel", self.os_kernel)) if self.os_loader is not None: os.append(self._text_node("loader", self.os_loader)) if self.os_initrd is not None: os.append(self._text_node("initrd", self.os_initrd)) if self.os_cmdline is not None: os.append(self._text_node("cmdline", self.os_cmdline)) if self.os_root is not None: os.append(self._text_node("root", self.os_root)) if self.os_init_path is not None: os.append(self._text_node("init", self.os_init_path)) for boot_dev in self.os_boot_dev: os.append(etree.Element("boot", dev=boot_dev)) if self.os_smbios is not None: os.append(self.os_smbios.format_dom()) root.append(os) def _format_features(self, root): if self.acpi or self.apic: features = etree.Element("features") if self.acpi: features.append(etree.Element("acpi")) if self.apic: features.append(etree.Element("apic")) root.append(features) def _format_cputune(self, root): cputune = etree.Element("cputune") if self.cpu_shares is not None: cputune.append(self._text_node("shares", self.cpu_shares)) if self.cpu_quota is not None: cputune.append(self._text_node("quota", self.cpu_quota)) if self.cpu_period is not None: cputune.append(self._text_node("period", self.cpu_period)) if len(cputune) > 0: root.append(cputune) def _format_devices(self, root): if len(self.devices) == 0: return devices = etree.Element("devices") for dev in self.devices: devices.append(dev.format_dom()) root.append(devices) def format_dom(self): root = super(LibvirtConfigGuest, self).format_dom() root.set("type", self.virt_type) self._format_basic_props(root) if self.sysinfo is not None: root.append(self.sysinfo.format_dom()) self._format_os(root) self._format_features(root) self._format_cputune(root) if self.clock is not None: root.append(self.clock.format_dom()) if self.cpu is not None: root.append(self.cpu.format_dom()) self._format_devices(root) return root def parse_dom(self, xmldoc): # Note: This cover only for: LibvirtConfigGuestDisks # LibvirtConfigGuestHostdevPCI # LibvirtConfigGuestCPU for c in xmldoc.getchildren(): if c.tag == 'devices': for d in c.getchildren(): if d.tag == 'disk': obj = LibvirtConfigGuestDisk() obj.parse_dom(d) self.devices.append(obj) elif d.tag == 'hostdev' and d.get('type') == 'pci': obj = LibvirtConfigGuestHostdevPCI() obj.parse_dom(d) self.devices.append(obj) elif c.tag == 'cpu': obj = LibvirtConfigGuestCPU() obj.parse_dom(c) self.cpu = obj def add_device(self, dev): self.devices.append(dev) def set_clock(self, clk): self.clock = clk class LibvirtConfigGuestSnapshot(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigGuestSnapshot, self).__init__( root_name="domainsnapshot", **kwargs) self.name = None self.disks = [] def format_dom(self): ss = super(LibvirtConfigGuestSnapshot, self).format_dom() if self.name: ss.append(self._text_node("name", self.name)) disks = etree.Element('disks') for disk in self.disks: disks.append(disk.format_dom()) ss.append(disks) return ss def add_disk(self, disk): self.disks.append(disk) class LibvirtConfigNodeDevice(LibvirtConfigObject): """Libvirt Node Devices parser""" def __init__(self, **kwargs): super(LibvirtConfigNodeDevice, self).__init__(root_name="device", **kwargs) self.name = None self.parent = None self.driver = None self.pci_capability = None def parse_dom(self, xmldoc): super(LibvirtConfigNodeDevice, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "name": self.name = c.text elif c.tag == "parent": self.parent = c.text elif c.tag == "capability" and c.get("type") == 'pci': pcicap = LibvirtConfigNodeDevicePciCap() pcicap.parse_dom(c) self.pci_capability = pcicap class LibvirtConfigNodeDevicePciCap(LibvirtConfigObject): """Libvirt Node Devices pci capability parser""" def __init__(self, **kwargs): super(LibvirtConfigNodeDevicePciCap, self).__init__( root_name="capability", **kwargs) self.domain = None self.bus = None self.slot = None self.function = None self.product = None self.product_id = None self.vendor = None self.vendor_id = None self.fun_capability = list() def parse_dom(self, xmldoc): super(LibvirtConfigNodeDevicePciCap, self).parse_dom(xmldoc) for c in xmldoc.getchildren(): if c.tag == "domain": self.domain = int(c.text) elif c.tag == "slot": self.slot = int(c.text) elif c.tag == "bus": self.bus = int(c.text) elif c.tag == "function": self.function = int(c.text) elif c.tag == "product": self.product = c.text self.product_id = c.get('id') elif c.tag == "vendor": self.vendor = c.text self.vendor_id = c.get('id') elif c.tag == "capability" and c.get('type') in \ ('virt_functions', 'phys_function'): funcap = LibvirtConfigNodeDevicePciSubFunctionCap() funcap.parse_dom(c) self.fun_capability.append(funcap) class LibvirtConfigNodeDevicePciSubFunctionCap(LibvirtConfigObject): def __init__(self, **kwargs): super(LibvirtConfigNodeDevicePciSubFunctionCap, self).__init__( root_name="capability", **kwargs) self.type = None self.device_addrs = list() # list of tuple (domain,bus,slot,function) def parse_dom(self, xmldoc): super(LibvirtConfigNodeDevicePciSubFunctionCap, self).parse_dom(xmldoc) self.type = xmldoc.get("type") for c in xmldoc.getchildren(): if c.tag == "address": self.device_addrs.append((c.get('domain'), c.get('bus'), c.get('slot'), c.get('function'))) class LibvirtConfigGuestRng(LibvirtConfigGuestDevice): def __init__(self, **kwargs): super(LibvirtConfigGuestRng, self).__init__(root_name="rng", **kwargs) self.model = 'random' self.backend = None self.rate_period = None self.rate_bytes = None def format_dom(self): dev = super(LibvirtConfigGuestRng, self).format_dom() dev.set('model', 'virtio') backend = etree.Element("backend") backend.set("model", self.model) backend.text = self.backend if self.rate_period and self.rate_bytes: rate = etree.Element("rate") rate.set("period", str(self.rate_period)) rate.set("bytes", str(self.rate_bytes)) dev.append(rate) dev.append(backend) return dev nova-2014.1.5/nova/virt/libvirt/dmcrypt.py0000664000567000056700000000371212540642544021516 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from nova.virt.libvirt import utils _dmcrypt_suffix = '-dmcrypt' def volume_name(base): """Returns the suffixed dmcrypt volume name. This is to avoid collisions with similarly named device mapper names for LVM volumes """ return base + _dmcrypt_suffix def create_volume(target, device, cipher, key_size, key): """Sets up a dmcrypt mapping :param target: device mapper logical device name :param device: underlying block device :param cipher: encryption cipher string digestible by cryptsetup :param cipher: encryption key size :param key: encryption key as an array of unsigned bytes """ cmd = ('cryptsetup', 'create', target, device, '--cipher=' + cipher, '--key-size=' + str(key_size), '--key-file=-') key = ''.join(map(lambda byte: "%02x" % byte, key)) utils.execute(*cmd, process_input=key, run_as_root=True) def delete_volume(target): """Deletes a dmcrypt mapping :param target: name of the mapped logical device """ utils.execute('cryptsetup', 'remove', target, run_as_root=True) def list_volumes(): """Function enumerates encrypted volumes.""" return [dmdev for dmdev in os.listdir('/dev/mapper') if dmdev.endswith('-dmcrypt')] nova-2014.1.5/nova/virt/libvirt/blockinfo.py0000664000567000056700000005221712540642544022006 0ustar jenkinsjenkins00000000000000# Copyright (C) 2012-2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handling of block device information and mapping. This module contains helper methods for interpreting the block device information and determining the suitable mapping to guest devices and libvirt XML. Throughout these methods there are a number of standard variables / types used * 'mapping': a dict contains the storage device mapping. For the default disk types it will contain the following keys & values: 'disk' -> disk_info 'disk.rescue' -> disk_info 'disk.local' -> disk_info 'disk.swap' -> disk_info 'disk.config' -> disk_info If any of the default disks are overridden by the block device info mappings, the hash value will be None For any ephemeral device there will also be a dict entry 'disk.eph$NUM' -> disk_info For any volume device there will also be a dict entry: $path -> disk_info Finally a special key will refer to the root device: 'root' -> disk_info * 'disk_info': a tuple specifying disk configuration It contains the following 3 fields (disk bus, disk dev, device type) and possibly these optional fields: ('format',) * 'disk_bus': the guest bus type ('ide', 'virtio', 'scsi', etc) * 'disk_dev': the device name 'vda', 'hdc', 'sdf', 'xvde' etc * 'device_type': type of device eg 'disk', 'cdrom', 'floppy' * 'format': Which format to apply to the device if applicable * 'boot_index': Number designating the boot order of the device """ import itertools import operator from oslo.config import cfg from nova import block_device from nova.compute import flavors from nova import exception from nova.objects import block_device as block_device_obj from nova.openstack.common.gettextutils import _ from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt import driver from nova.virt.libvirt import utils as libvirt_utils CONF = cfg.CONF SUPPORTED_DEVICE_TYPES = ('disk', 'cdrom', 'floppy', 'lun') BOOT_DEV_FOR_TYPE = {'disk': 'hd', 'cdrom': 'cdrom', 'floppy': 'fd'} def has_disk_dev(mapping, disk_dev): """Determine if a disk device name has already been used. Looks at all the keys in mapping to see if any corresponding disk_info tuple has a device name matching disk_dev Returns True if the disk_dev is in use. """ for disk in mapping: info = mapping[disk] if info['dev'] == disk_dev: return True return False def get_dev_prefix_for_disk_bus(disk_bus): """Determine the dev prefix for a disk bus. Determine the dev prefix to be combined with a disk number to fix a disk_dev. eg 'hd' for 'ide' bus can be used to form a disk dev 'hda' Returns the dev prefix or raises an exception if the disk bus is unknown. """ if CONF.libvirt.disk_prefix: return CONF.libvirt.disk_prefix if disk_bus == "ide": return "hd" elif disk_bus == "virtio": return "vd" elif disk_bus == "xen": # Two possible mappings for Xen, xvda or sda # which are interchangeable, so we pick sda return "sd" elif disk_bus == "scsi": return "sd" elif disk_bus == "usb": return "sd" elif disk_bus == "fdc": return "fd" elif disk_bus == "uml": return "ubd" elif disk_bus == "lxc": return None else: raise exception.NovaException( _("Unable to determine disk prefix for %s") % disk_bus) def get_dev_count_for_disk_bus(disk_bus): """Determine the number disks supported. Determine how many disks can be supported in a single VM for a particular disk bus. Returns the number of disks supported. """ if disk_bus == "ide": return 4 else: return 26 def find_disk_dev_for_disk_bus(mapping, bus, last_device=False): """Identify a free disk dev name for a bus. Determines the possible disk dev names for the bus, and then checks them in order until it identifies one that is not yet used in the disk mapping. If 'last_device' is set, it will only consider the last available disk dev name. Returns the chosen disk_dev name, or raises an exception if none is available. """ dev_prefix = get_dev_prefix_for_disk_bus(bus) if dev_prefix is None: return None max_dev = get_dev_count_for_disk_bus(bus) if last_device: devs = [max_dev - 1] else: devs = range(max_dev) for idx in devs: disk_dev = dev_prefix + chr(ord('a') + idx) if not has_disk_dev(mapping, disk_dev): return disk_dev raise exception.NovaException( _("No free disk device names for prefix '%s'"), dev_prefix) def is_disk_bus_valid_for_virt(virt_type, disk_bus): valid_bus = { 'qemu': ['virtio', 'scsi', 'ide', 'usb', 'fdc'], 'kvm': ['virtio', 'scsi', 'ide', 'usb', 'fdc'], 'xen': ['xen', 'ide'], 'uml': ['uml'], 'lxc': ['lxc'], } if virt_type not in valid_bus: raise exception.UnsupportedVirtType(virt=virt_type) return disk_bus in valid_bus[virt_type] def get_disk_bus_for_device_type(virt_type, image_meta=None, device_type="disk"): """Determine the best disk bus to use for a device type. Considering the currently configured virtualization type, return the optimal disk_bus to use for a given device type. For example, for a disk on KVM it will return 'virtio', while for a CDROM it will return 'ide' on x86_64 and 'scsi' on ppc64. Returns the disk_bus, or returns None if the device type is not supported for this virtualization """ # Prefer a disk bus set against the image first of all if image_meta: key = "hw_" + device_type + "_bus" disk_bus = image_meta.get('properties', {}).get(key) if disk_bus is not None: if not is_disk_bus_valid_for_virt(virt_type, disk_bus): raise exception.UnsupportedHardware(model=disk_bus, virt=virt_type) return disk_bus # Otherwise pick a hypervisor default disk bus if virt_type == "uml": if device_type == "disk": return "uml" elif virt_type == "lxc": return "lxc" elif virt_type == "xen": if device_type == "cdrom": return "ide" elif device_type == "disk": return "xen" elif virt_type in ("qemu", "kvm"): if device_type == "cdrom": arch = libvirt_utils.get_arch(image_meta) if arch in ("ppc", "ppc64"): return "scsi" else: return "ide" elif device_type == "disk": return "virtio" elif device_type == "floppy": return "fdc" return None def get_disk_bus_for_disk_dev(virt_type, disk_dev): """Determine the disk bus for a disk device. Given a disk device like 'hda', 'sdf', 'xvdb', etc guess what the most appropriate disk bus is for the currently configured virtualization technology Returns the disk bus, or raises an Exception if the disk device prefix is unknown. """ if disk_dev[:2] == 'hd': return "ide" elif disk_dev[:2] == 'sd': # Reverse mapping 'sd' is not reliable # there are many possible mappings. So # this picks the most likely mappings if virt_type == "xen": return "xen" else: return "scsi" elif disk_dev[:2] == 'vd': return "virtio" elif disk_dev[:2] == 'fd': return "fdc" elif disk_dev[:3] == 'xvd': return "xen" elif disk_dev[:3] == 'ubd': return "uml" else: raise exception.NovaException( _("Unable to determine disk bus for '%s'") % disk_dev[:1]) def get_next_disk_info(mapping, disk_bus, device_type='disk', last_device=False, boot_index=None): """Determine the disk info for the next device on disk_bus. Considering the disks already listed in the disk mapping, determine the next available disk dev that can be assigned for the disk bus. Returns the disk_info for the next available disk. """ disk_dev = find_disk_dev_for_disk_bus(mapping, disk_bus, last_device) info = {'bus': disk_bus, 'dev': disk_dev, 'type': device_type} if boot_index is not None and boot_index >= 0: info['boot_index'] = str(boot_index) return info def get_eph_disk(index): return 'disk.eph' + str(index) def get_config_drive_type(): """Determine the type of config drive. If config_drive_format is set to iso9660 then the config drive will be 'cdrom', otherwise 'disk'. Returns a string indicating the config drive type. """ if CONF.config_drive_format == 'iso9660': config_drive_type = 'cdrom' elif CONF.config_drive_format == 'vfat': config_drive_type = 'disk' else: raise exception.ConfigDriveUnknownFormat( format=CONF.config_drive_format) return config_drive_type def get_info_from_bdm(virt_type, bdm, mapping={}, disk_bus=None, dev_type=None, allowed_types=None, assigned_devices=None): allowed_types = allowed_types or SUPPORTED_DEVICE_TYPES device_name = block_device.strip_dev(get_device_name(bdm)) bdm_type = bdm.get('device_type') or dev_type if bdm_type not in allowed_types: bdm_type = 'disk' bdm_bus = bdm.get('disk_bus') or disk_bus if not is_disk_bus_valid_for_virt(virt_type, bdm_bus): if device_name: bdm_bus = get_disk_bus_for_disk_dev(virt_type, device_name) else: bdm_bus = get_disk_bus_for_device_type(virt_type, None, bdm_type) if not device_name: if assigned_devices: padded_mapping = dict((dev, {'dev': dev}) for dev in assigned_devices) padded_mapping.update(mapping) else: padded_mapping = mapping device_name = find_disk_dev_for_disk_bus(padded_mapping, bdm_bus) bdm_info = {'bus': bdm_bus, 'dev': device_name, 'type': bdm_type} bdm_format = bdm.get('guest_format') if bdm_format: bdm_info.update({'format': bdm_format}) boot_index = bdm.get('boot_index') if boot_index is not None and boot_index >= 0: # NOTE(ndipanov): libvirt starts ordering from 1, not 0 bdm_info['boot_index'] = str(boot_index + 1) return bdm_info def get_device_name(bdm): """Get the device name if present regardless of the bdm format.""" if isinstance(bdm, block_device_obj.BlockDeviceMapping): return bdm.device_name else: return bdm.get('device_name') or bdm.get('mount_device') def get_root_info(virt_type, image_meta, root_bdm, disk_bus, cdrom_bus, root_device_name=None): # NOTE (ndipanov): This is a hack to avoid considering an image # BDM with local target, as we don't support them # yet. Only applies when passed non-driver format no_root_bdm = (not root_bdm or ( root_bdm.get('source_type') == 'image' and root_bdm.get('destination_type') == 'local')) if no_root_bdm: if (image_meta and image_meta.get('disk_format') == 'iso'): root_device_bus = cdrom_bus root_device_type = 'cdrom' else: root_device_bus = disk_bus root_device_type = 'disk' if root_device_name: root_device_bus = get_disk_bus_for_disk_dev(virt_type, root_device_name) else: root_device_name = find_disk_dev_for_disk_bus({}, root_device_bus) return {'bus': root_device_bus, 'type': root_device_type, 'dev': block_device.strip_dev(root_device_name), 'boot_index': '1'} else: if not get_device_name(root_bdm) and root_device_name: root_bdm = root_bdm.copy() root_bdm['device_name'] = root_device_name return get_info_from_bdm(virt_type, root_bdm, {}, disk_bus) def default_device_names(virt_type, context, instance, root_device_name, ephemerals, swap, block_device_mapping): block_device_info = { 'root_device_name': root_device_name, 'swap': driver_block_device.get_swap( driver_block_device.convert_swap(swap)), 'ephemerals': driver_block_device.convert_ephemerals(ephemerals), 'block_device_mapping': ( driver_block_device.convert_volumes( block_device_mapping) + driver_block_device.convert_snapshots( block_device_mapping)) } get_disk_info(virt_type, instance, block_device_info) for driver_bdm in itertools.chain(block_device_info['ephemerals'], [block_device_info['swap']] if block_device_info['swap'] else [], block_device_info['block_device_mapping']): driver_bdm.save(context) def has_default_ephemeral(instance, disk_bus, block_device_info, mapping): ephemerals = driver.block_device_info_get_ephemerals(block_device_info) if instance['ephemeral_gb'] <= 0 or ephemerals: return None else: info = get_next_disk_info(mapping, disk_bus) if block_device.volume_in_mapping(info['dev'], block_device_info): return None return info def update_bdm(bdm, info): device_name_field = ('device_name' if 'device_name' in bdm else 'mount_device') # Do not update the device name if it was already present bdm.update(dict(zip((device_name_field, 'disk_bus', 'device_type'), ((bdm.get(device_name_field) or block_device.prepend_dev(info['dev'])), info['bus'], info['type'])))) def get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, block_device_info=None, image_meta=None, rescue=False): """Determine how to map default disks to the virtual machine. This is about figuring out whether the default 'disk', 'disk.local', 'disk.swap' and 'disk.config' images have been overridden by the block device mapping. Returns the guest disk mapping for the devices. """ inst_type = flavors.extract_flavor(instance) mapping = {} pre_assigned_device_names = \ [block_device.strip_dev(get_device_name(bdm)) for bdm in itertools.chain( driver.block_device_info_get_ephemerals(block_device_info), [driver.block_device_info_get_swap(block_device_info)], driver.block_device_info_get_mapping(block_device_info)) if get_device_name(bdm)] if virt_type == "lxc": # NOTE(zul): This information is not used by the libvirt driver # however we need to populate mapping so the image can be # created when the instance is started. This can # be removed when we convert LXC to use block devices. root_disk_bus = disk_bus root_device_type = 'disk' root_info = get_next_disk_info(mapping, root_disk_bus, root_device_type, boot_index=1) mapping['root'] = root_info mapping['disk'] = root_info return mapping if rescue: rescue_info = get_next_disk_info(mapping, disk_bus, boot_index=1) mapping['disk.rescue'] = rescue_info mapping['root'] = rescue_info os_info = get_next_disk_info(mapping, disk_bus) mapping['disk'] = os_info return mapping # NOTE (ndipanov): root_bdm can be None when we boot from image # as there is no driver represenation of local targeted images # and they will not be in block_device_info list. root_bdm = block_device.get_root_bdm( driver.block_device_info_get_mapping(block_device_info)) root_device_name = block_device.strip_dev( driver.block_device_info_get_root(block_device_info)) root_info = get_root_info(virt_type, image_meta, root_bdm, disk_bus, cdrom_bus, root_device_name) mapping['root'] = root_info # NOTE (ndipanov): This implicitly relies on image->local BDMs not # being considered in the driver layer - so missing # bdm with boot_index 0 means - use image, unless it was # overriden. This can happen when using legacy syntax and # no root_device_name is set on the instance. if not root_bdm and not block_device.volume_in_mapping(root_info['dev'], block_device_info): mapping['disk'] = root_info default_eph = has_default_ephemeral(instance, disk_bus, block_device_info, mapping) if default_eph: mapping['disk.local'] = default_eph for idx, eph in enumerate(driver.block_device_info_get_ephemerals( block_device_info)): eph_info = get_info_from_bdm( virt_type, eph, mapping, disk_bus, assigned_devices=pre_assigned_device_names) mapping[get_eph_disk(idx)] = eph_info update_bdm(eph, eph_info) swap = driver.block_device_info_get_swap(block_device_info) if swap and swap.get('swap_size', 0) > 0: swap_info = get_info_from_bdm(virt_type, swap, mapping, disk_bus) mapping['disk.swap'] = swap_info update_bdm(swap, swap_info) elif inst_type['swap'] > 0: swap_info = get_next_disk_info(mapping, disk_bus) if not block_device.volume_in_mapping(swap_info['dev'], block_device_info): mapping['disk.swap'] = swap_info block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: vol_info = get_info_from_bdm( virt_type, vol, mapping, assigned_devices=pre_assigned_device_names) mapping[block_device.prepend_dev(vol_info['dev'])] = vol_info update_bdm(vol, vol_info) if configdrive.required_by(instance): device_type = get_config_drive_type() disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, device_type) config_info = get_next_disk_info(mapping, disk_bus, device_type, last_device=True) mapping['disk.config'] = config_info return mapping def get_disk_info(virt_type, instance, block_device_info=None, image_meta=None, rescue=False): """Determine guest disk mapping info. This is a wrapper around get_disk_mapping, which also returns the chosen disk_bus and cdrom_bus. The returned data is in a dict - disk_bus: the bus for harddisks - cdrom_bus: the bus for CDROMs - mapping: the disk mapping Returns the disk mapping disk. """ disk_bus = get_disk_bus_for_device_type(virt_type, image_meta, "disk") cdrom_bus = get_disk_bus_for_device_type(virt_type, image_meta, "cdrom") mapping = get_disk_mapping(virt_type, instance, disk_bus, cdrom_bus, block_device_info, image_meta, rescue) return {'disk_bus': disk_bus, 'cdrom_bus': cdrom_bus, 'mapping': mapping} def get_boot_order(disk_info): boot_mapping = (info for name, info in disk_info['mapping'].iteritems() if name != 'root' and info.get('boot_index') is not None) boot_devs_dup = (BOOT_DEV_FOR_TYPE[dev['type']] for dev in sorted(boot_mapping, key=operator.itemgetter('boot_index'))) def uniq(lst): s = set() return [el for el in lst if el not in s and not s.add(el)] return uniq(boot_devs_dup) nova-2014.1.5/nova/virt/libvirt/driver.py0000664000567000056700000070303512540642544021334 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2012 University Of Minho # (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A connection to a hypervisor through libvirt. Supports KVM, LXC, QEMU, UML, and XEN. **Related Flags** :driver_type: Libvirt domain type. Can be kvm, qemu, uml, xen (default: kvm). :connection_uri: Override for the default libvirt URI (depends on driver_type). :disk_prefix: Override the default disk prefix for the devices attached to a server. :rescue_image_id: Rescue ami image (None = original image). :rescue_kernel_id: Rescue aki image (None = original image). :rescue_ramdisk_id: Rescue ari image (None = original image). :injected_network_template: Template file for injected network :allow_same_net_traffic: Whether to allow in project network traffic """ import errno import eventlet import functools import glob import mmap import os import shutil import socket import sys import tempfile import threading import time import uuid from eventlet import greenio from eventlet import greenthread from eventlet import patcher from eventlet import tpool from eventlet import util as eventlet_util from lxml import etree from oslo.config import cfg from nova.api.metadata import base as instance_metadata from nova import block_device from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_mode from nova import context as nova_context from nova import exception from nova.image import glance from nova.objects import block_device as block_device_obj from nova.objects import flavor as flavor_obj from nova.objects import instance as instance_obj from nova.objects import service as service_obj from nova.openstack.common import excutils from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common.gettextutils import _LW from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import processutils from nova.openstack.common import units from nova.openstack.common import xmlutils from nova.pci import pci_manager from nova.pci import pci_utils from nova.pci import pci_whitelist from nova import rpc from nova import utils from nova import version from nova.virt import block_device as driver_block_device from nova.virt import configdrive from nova.virt import cpu from nova.virt.disk import api as disk from nova.virt import driver from nova.virt import event as virtevent from nova.virt import firewall from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import firewall as libvirt_firewall from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache from nova.virt.libvirt import utils as libvirt_utils from nova.virt import netutils from nova.virt import watchdog_actions from nova import volume from nova.volume import encryptors native_threading = patcher.original("threading") native_Queue = patcher.original("Queue") libvirt = None LOG = logging.getLogger(__name__) libvirt_opts = [ cfg.StrOpt('rescue_image_id', help='Rescue ami image', deprecated_group='DEFAULT'), cfg.StrOpt('rescue_kernel_id', help='Rescue aki image', deprecated_group='DEFAULT'), cfg.StrOpt('rescue_ramdisk_id', help='Rescue ari image', deprecated_group='DEFAULT'), cfg.StrOpt('virt_type', default='kvm', help='Libvirt domain type (valid options are: ' 'kvm, lxc, qemu, uml, xen)', deprecated_group='DEFAULT', deprecated_name='libvirt_type'), cfg.StrOpt('connection_uri', default='', help='Override the default libvirt URI ' '(which is dependent on virt_type)', deprecated_group='DEFAULT', deprecated_name='libvirt_uri'), cfg.BoolOpt('inject_password', default=False, help='Inject the admin password at boot time, ' 'without an agent.', deprecated_name='libvirt_inject_password', deprecated_group='DEFAULT'), cfg.BoolOpt('inject_key', default=False, help='Inject the ssh public key at boot time', deprecated_name='libvirt_inject_key', deprecated_group='DEFAULT'), cfg.IntOpt('inject_partition', default=-2, help='The partition to inject to : ' '-2 => disable, -1 => inspect (libguestfs only), ' '0 => not partitioned, >0 => partition number', deprecated_name='libvirt_inject_partition', deprecated_group='DEFAULT'), cfg.BoolOpt('use_usb_tablet', default=True, help='Sync virtual and real mouse cursors in Windows VMs', deprecated_group='DEFAULT'), cfg.StrOpt('live_migration_uri', default="qemu+tcp://%s/system", help='Migration target URI ' '(any included "%s" is replaced with ' 'the migration target hostname)', deprecated_group='DEFAULT'), cfg.StrOpt('live_migration_flag', default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER', help='Migration flags to be set for live migration', deprecated_group='DEFAULT'), cfg.StrOpt('block_migration_flag', default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, ' 'VIR_MIGRATE_NON_SHARED_INC', help='Migration flags to be set for block migration', deprecated_group='DEFAULT'), cfg.IntOpt('live_migration_bandwidth', default=0, help='Maximum bandwidth to be used during migration, in Mbps', deprecated_group='DEFAULT'), cfg.StrOpt('snapshot_image_format', help='Snapshot image format (valid options are : ' 'raw, qcow2, vmdk, vdi). ' 'Defaults to same as source image', deprecated_group='DEFAULT'), cfg.StrOpt('vif_driver', default='nova.virt.libvirt.vif.LibvirtGenericVIFDriver', help='DEPRECATED. The libvirt VIF driver to configure the VIFs.' 'This option is deprecated and will be removed in the ' 'Juno release.', deprecated_name='libvirt_vif_driver', deprecated_group='DEFAULT'), cfg.ListOpt('volume_drivers', default=[ 'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver', 'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver', 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver', 'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver', 'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver', 'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver', 'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver', 'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver', 'glusterfs=' 'nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver', 'fibre_channel=nova.virt.libvirt.volume.' 'LibvirtFibreChannelVolumeDriver', 'scality=' 'nova.virt.libvirt.volume.LibvirtScalityVolumeDriver', ], help='Libvirt handlers for remote volumes.', deprecated_name='libvirt_volume_drivers', deprecated_group='DEFAULT'), cfg.StrOpt('disk_prefix', help='Override the default disk prefix for the devices attached' ' to a server, which is dependent on virt_type. ' '(valid options are: sd, xvd, uvd, vd)', deprecated_name='libvirt_disk_prefix', deprecated_group='DEFAULT'), cfg.IntOpt('wait_soft_reboot_seconds', default=120, help='Number of seconds to wait for instance to shut down after' ' soft reboot request is made. We fall back to hard reboot' ' if instance does not shutdown within this window.', deprecated_name='libvirt_wait_soft_reboot_seconds', deprecated_group='DEFAULT'), cfg.StrOpt('cpu_mode', help='Set to "host-model" to clone the host CPU feature flags; ' 'to "host-passthrough" to use the host CPU model exactly; ' 'to "custom" to use a named CPU model; ' 'to "none" to not set any CPU model. ' 'If virt_type="kvm|qemu", it will default to ' '"host-model", otherwise it will default to "none"', deprecated_name='libvirt_cpu_mode', deprecated_group='DEFAULT'), cfg.StrOpt('cpu_model', help='Set to a named libvirt CPU model (see names listed ' 'in /usr/share/libvirt/cpu_map.xml). Only has effect if ' 'cpu_mode="custom" and virt_type="kvm|qemu"', deprecated_name='libvirt_cpu_model', deprecated_group='DEFAULT'), cfg.StrOpt('snapshots_directory', default='$instances_path/snapshots', help='Location where libvirt driver will store snapshots ' 'before uploading them to image service', deprecated_name='libvirt_snapshots_directory', deprecated_group='DEFAULT'), cfg.StrOpt('xen_hvmloader_path', default='/usr/lib/xen/boot/hvmloader', help='Location where the Xen hvmloader is kept', deprecated_group='DEFAULT'), cfg.ListOpt('disk_cachemodes', default=[], help='Specific cachemodes to use for different disk types ' 'e.g: file=directsync,block=none', deprecated_group='DEFAULT'), cfg.StrOpt('rng_dev_path', help='A path to a device that will be used as source of ' 'entropy on the host. Permitted options are: ' '/dev/random or /dev/hwrng'), ] CONF = cfg.CONF CONF.register_opts(libvirt_opts, 'libvirt') CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('default_ephemeral_format', 'nova.virt.driver') CONF.import_opt('use_cow_images', 'nova.virt.driver') CONF.import_opt('live_migration_retry_count', 'nova.compute.manager') CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc') CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice') CONF.import_opt('vcpu_pin_set', 'nova.virt.cpu') CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver') CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( libvirt_firewall.__name__, libvirt_firewall.IptablesFirewallDriver.__name__) MAX_CONSOLE_BYTES = 100 * units.Ki # The libvirt driver will prefix any disable reason codes with this string. DISABLE_PREFIX = 'AUTO: ' # Disable reason for the service which was enabled or disabled without reason DISABLE_REASON_UNDEFINED = 'None' def patch_tpool_proxy(): """eventlet.tpool.Proxy doesn't work with old-style class in __str__() or __repr__() calls. See bug #962840 for details. We perform a monkey patch to replace those two instance methods. """ def str_method(self): return str(self._obj) def repr_method(self): return repr(self._obj) tpool.Proxy.__str__ = str_method tpool.Proxy.__repr__ = repr_method patch_tpool_proxy() VIR_DOMAIN_NOSTATE = 0 VIR_DOMAIN_RUNNING = 1 VIR_DOMAIN_BLOCKED = 2 VIR_DOMAIN_PAUSED = 3 VIR_DOMAIN_SHUTDOWN = 4 VIR_DOMAIN_SHUTOFF = 5 VIR_DOMAIN_CRASHED = 6 VIR_DOMAIN_PMSUSPENDED = 7 LIBVIRT_POWER_STATE = { VIR_DOMAIN_NOSTATE: power_state.NOSTATE, VIR_DOMAIN_RUNNING: power_state.RUNNING, # NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen. # It means that the VM is running and the vCPU is idle. So, # we map it to RUNNING VIR_DOMAIN_BLOCKED: power_state.RUNNING, VIR_DOMAIN_PAUSED: power_state.PAUSED, # NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN # means the domain is being shut down. So technically the domain # is still running. SHUTOFF is the real powered off state. # But we will map both to SHUTDOWN anyway. # http://libvirt.org/html/libvirt-libvirt.html VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN, VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN, VIR_DOMAIN_CRASHED: power_state.CRASHED, VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED, } MIN_LIBVIRT_VERSION = (0, 9, 6) # When the above version matches/exceeds this version # delete it & corresponding code using it MIN_LIBVIRT_HOST_CPU_VERSION = (0, 9, 10) MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1) # Live snapshot requirements REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU" MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0) MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0) # block size tuning requirements MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2) # BlockJobInfo management requirement MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1) def libvirt_error_handler(context, err): # Just ignore instead of default outputting to stderr. pass class LibvirtDriver(driver.ComputeDriver): capabilities = { "has_imagecache": True, "supports_recreate": True, } def __init__(self, virtapi, read_only=False): super(LibvirtDriver, self).__init__(virtapi) global libvirt if libvirt is None: libvirt = __import__('libvirt') self._host_state = None self._initiator = None self._fc_wwnns = None self._fc_wwpns = None self._wrapped_conn = None self._wrapped_conn_lock = threading.Lock() self._caps = None self._vcpu_total = 0 self.read_only = read_only self.firewall_driver = firewall.load_driver( DEFAULT_FIREWALL_DRIVER, self.virtapi, get_connection=self._get_connection) vif_class = importutils.import_class(CONF.libvirt.vif_driver) self.vif_driver = vif_class(self._get_connection) self.volume_drivers = driver.driver_dict_from_config( CONF.libvirt.volume_drivers, self) self.dev_filter = pci_whitelist.get_pci_devices_filter() self._event_queue = None self._disk_cachemode = None self.image_cache_manager = imagecache.ImageCacheManager() self.image_backend = imagebackend.Backend(CONF.use_cow_images) self.disk_cachemodes = {} self.valid_cachemodes = ["default", "none", "writethrough", "writeback", "directsync", "unsafe", ] for mode_str in CONF.libvirt.disk_cachemodes: disk_type, sep, cache_mode = mode_str.partition('=') if cache_mode not in self.valid_cachemodes: LOG.warn(_('Invalid cachemode %(cache_mode)s specified ' 'for disk type %(disk_type)s.'), {'cache_mode': cache_mode, 'disk_type': disk_type}) continue self.disk_cachemodes[disk_type] = cache_mode self._volume_api = volume.API() @property def disk_cachemode(self): if self._disk_cachemode is None: # We prefer 'none' for consistent performance, host crash # safety & migration correctness by avoiding host page cache. # Some filesystems (eg GlusterFS via FUSE) don't support # O_DIRECT though. For those we fallback to 'writethrough' # which gives host crash safety, and is safe for migration # provided the filesystem is cache coherent (cluster filesystems # typically are, but things like NFS are not). self._disk_cachemode = "none" if not self._supports_direct_io(CONF.instances_path): self._disk_cachemode = "writethrough" return self._disk_cachemode @property def host_state(self): if not self._host_state: self._host_state = HostState(self) return self._host_state def set_cache_mode(self, conf): """Set cache mode on LibvirtConfigGuestDisk object.""" try: source_type = conf.source_type driver_cache = conf.driver_cache except AttributeError: return cache_mode = self.disk_cachemodes.get(source_type, driver_cache) conf.driver_cache = cache_mode @staticmethod def _has_min_version(conn, lv_ver=None, hv_ver=None, hv_type=None): try: if lv_ver is not None: libvirt_version = conn.getLibVersion() if libvirt_version < utils.convert_version_to_int(lv_ver): return False if hv_ver is not None: hypervisor_version = conn.getVersion() if hypervisor_version < utils.convert_version_to_int(hv_ver): return False if hv_type is not None: hypervisor_type = conn.getType() if hypervisor_type != hv_type: return False return True except Exception: return False def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None): return self._has_min_version(self._conn, lv_ver, hv_ver, hv_type) def _native_thread(self): """Receives async events coming in from libvirtd. This is a native thread which runs the default libvirt event loop implementation. This processes any incoming async events from libvirtd and queues them for later dispatch. This thread is only permitted to use libvirt python APIs, and the driver.queue_event method. In particular any use of logging is forbidden, since it will confuse eventlet's greenthread integration """ while True: libvirt.virEventRunDefaultImpl() def _dispatch_thread(self): """Dispatches async events coming in from libvirtd. This is a green thread which waits for events to arrive from the libvirt event loop thread. This then dispatches the events to the compute manager. """ while True: self._dispatch_events() @staticmethod def _event_lifecycle_callback(conn, dom, event, detail, opaque): """Receives lifecycle events from libvirt. NB: this method is executing in a native thread, not an eventlet coroutine. It can only invoke other libvirt APIs, or use self.queue_event(). Any use of logging APIs in particular is forbidden. """ self = opaque uuid = dom.UUIDString() transition = None if event == libvirt.VIR_DOMAIN_EVENT_STOPPED: transition = virtevent.EVENT_LIFECYCLE_STOPPED elif event == libvirt.VIR_DOMAIN_EVENT_STARTED: transition = virtevent.EVENT_LIFECYCLE_STARTED elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED: transition = virtevent.EVENT_LIFECYCLE_PAUSED elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED: transition = virtevent.EVENT_LIFECYCLE_RESUMED if transition is not None: self._queue_event(virtevent.LifecycleEvent(uuid, transition)) def _queue_event(self, event): """Puts an event on the queue for dispatch. This method is called by the native event thread to put events on the queue for later dispatch by the green thread. Any use of logging APIs is forbidden. """ if self._event_queue is None: return # Queue the event... self._event_queue.put(event) # ...then wakeup the green thread to dispatch it c = ' '.encode() self._event_notify_send.write(c) self._event_notify_send.flush() def _dispatch_events(self): """Wait for & dispatch events from native thread Blocks until native thread indicates some events are ready. Then dispatches all queued events. """ # Wait to be notified that there are some # events pending try: _c = self._event_notify_recv.read(1) assert _c except ValueError: return # will be raised when pipe is closed # Process as many events as possible without # blocking last_close_event = None while not self._event_queue.empty(): try: event = self._event_queue.get(block=False) if isinstance(event, virtevent.LifecycleEvent): self.emit_event(event) elif 'conn' in event and 'reason' in event: last_close_event = event except native_Queue.Empty: pass if last_close_event is None: return conn = last_close_event['conn'] # get_new_connection may already have disabled the host, # in which case _wrapped_conn is None. with self._wrapped_conn_lock: if conn == self._wrapped_conn: reason = last_close_event['reason'] _error = _("Connection to libvirt lost: %s") % reason LOG.warn(_error) self._wrapped_conn = None # Disable compute service to avoid # new instances of being scheduled on this host. self._set_host_enabled(False, disable_reason=_error) def _init_events_pipe(self): """Create a self-pipe for the native thread to synchronize on. This code is taken from the eventlet tpool module, under terms of the Apache License v2.0. """ self._event_queue = native_Queue.Queue() try: rpipe, wpipe = os.pipe() self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0) self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0) except (ImportError, NotImplementedError): # This is Windows compatibility -- use a socket instead # of a pipe because pipes don't really exist on Windows. sock = eventlet_util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('localhost', 0)) sock.listen(50) csock = eventlet_util.__original_socket__(socket.AF_INET, socket.SOCK_STREAM) csock.connect(('localhost', sock.getsockname()[1])) nsock, addr = sock.accept() self._event_notify_send = nsock.makefile('wb', 0) gsock = greenio.GreenSocket(csock) self._event_notify_recv = gsock.makefile('rb', 0) def _init_events(self): """Initializes the libvirt events subsystem. This requires running a native thread to provide the libvirt event loop integration. This forwards events to a green thread which does the actual dispatching. """ self._init_events_pipe() LOG.debug(_("Starting native event thread")) event_thread = native_threading.Thread(target=self._native_thread) event_thread.setDaemon(True) event_thread.start() LOG.debug(_("Starting green dispatch thread")) eventlet.spawn(self._dispatch_thread) def _do_quality_warnings(self): """Warn about untested driver configurations. This will log a warning message about untested driver or host arch configurations to indicate to administrators that the quality is unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems is tested upstream. """ caps = self.get_host_capabilities() arch = caps.host.cpu.arch if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or arch not in ('i686', 'x86_64')): LOG.warning(_('The libvirt driver is not tested on ' '%(type)s/%(arch)s by the OpenStack project and ' 'thus its quality can not be ensured. For more ' 'information, see: https://wiki.openstack.org/wiki/' 'HypervisorSupportMatrix'), {'type': CONF.libvirt.virt_type, 'arch': arch}) def init_host(self, host): # NOTE(dkliban): Error handler needs to be registered before libvirt # connection is used for the first time. Otherwise, the # handler does not get registered. libvirt.registerErrorHandler(libvirt_error_handler, None) libvirt.virEventRegisterDefaultImpl() self._do_quality_warnings() if not self.has_min_version(MIN_LIBVIRT_VERSION): major = MIN_LIBVIRT_VERSION[0] minor = MIN_LIBVIRT_VERSION[1] micro = MIN_LIBVIRT_VERSION[2] LOG.error(_('Nova requires libvirt version ' '%(major)i.%(minor)i.%(micro)i or greater.'), {'major': major, 'minor': minor, 'micro': micro}) self._init_events() def _get_new_connection(self): # call with _wrapped_conn_lock held LOG.debug(_('Connecting to libvirt: %s'), self.uri()) wrapped_conn = None try: wrapped_conn = self._connect(self.uri(), self.read_only) finally: # Enabling the compute service, in case it was disabled # since the connection was successful. disable_reason = DISABLE_REASON_UNDEFINED if not wrapped_conn: disable_reason = 'Failed to connect to libvirt' self._set_host_enabled(bool(wrapped_conn), disable_reason) self._wrapped_conn = wrapped_conn try: LOG.debug(_("Registering for lifecycle events %s"), self) wrapped_conn.domainEventRegisterAny( None, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self._event_lifecycle_callback, self) except Exception as e: LOG.warn(_("URI %(uri)s does not support events: %(error)s"), {'uri': self.uri(), 'error': e}) try: LOG.debug(_("Registering for connection events: %s") % str(self)) wrapped_conn.registerCloseCallback(self._close_callback, None) except (TypeError, AttributeError) as e: # NOTE: The registerCloseCallback of python-libvirt 1.0.1+ # is defined with 3 arguments, and the above registerClose- # Callback succeeds. However, the one of python-libvirt 1.0.0 # is defined with 4 arguments and TypeError happens here. # Then python-libvirt 0.9 does not define a method register- # CloseCallback. LOG.debug(_("The version of python-libvirt does not support " "registerCloseCallback or is too old: %s"), e) except libvirt.libvirtError as e: LOG.warn(_("URI %(uri)s does not support connection" " events: %(error)s"), {'uri': self.uri(), 'error': e}) return wrapped_conn def _get_connection(self): # multiple concurrent connections are protected by _wrapped_conn_lock with self._wrapped_conn_lock: wrapped_conn = self._wrapped_conn if not wrapped_conn or not self._test_connection(wrapped_conn): wrapped_conn = self._get_new_connection() return wrapped_conn _conn = property(_get_connection) def _close_callback(self, conn, reason, opaque): close_info = {'conn': conn, 'reason': reason} self._queue_event(close_info) @staticmethod def _test_connection(conn): try: conn.getLibVersion() return True except libvirt.libvirtError as e: if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): LOG.debug(_('Connection to libvirt broke')) return False raise @staticmethod def uri(): if CONF.libvirt.virt_type == 'uml': uri = CONF.libvirt.connection_uri or 'uml:///system' elif CONF.libvirt.virt_type == 'xen': uri = CONF.libvirt.connection_uri or 'xen:///' elif CONF.libvirt.virt_type == 'lxc': uri = CONF.libvirt.connection_uri or 'lxc:///' else: uri = CONF.libvirt.connection_uri or 'qemu:///system' return uri @staticmethod def _connect(uri, read_only): def _connect_auth_cb(creds, opaque): if len(creds) == 0: return 0 LOG.warning( _("Can not handle authentication request for %d credentials") % len(creds)) raise exception.NovaException( _("Can not handle authentication request for %d credentials") % len(creds)) auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_ECHOPROMPT, libvirt.VIR_CRED_REALM, libvirt.VIR_CRED_PASSPHRASE, libvirt.VIR_CRED_NOECHOPROMPT, libvirt.VIR_CRED_EXTERNAL], _connect_auth_cb, None] try: flags = 0 if read_only: flags = libvirt.VIR_CONNECT_RO # tpool.proxy_call creates a native thread. Due to limitations # with eventlet locking we cannot use the logging API inside # the called function. return tpool.proxy_call( (libvirt.virDomain, libvirt.virConnect), libvirt.openAuth, uri, auth, flags) except libvirt.libvirtError as ex: LOG.exception(_("Connection to libvirt failed: %s"), ex) payload = dict(ip=LibvirtDriver.get_host_ip_addr(), method='_connect', reason=ex) rpc.get_notifier('compute').error(nova_context.get_admin_context(), 'compute.libvirt.error', payload) raise exception.HypervisorUnavailable(host=CONF.host) def get_num_instances(self): """Efficient override of base instance_exists method.""" return self._conn.numOfDomains() def instance_exists(self, instance_name): """Efficient override of base instance_exists method.""" try: self._lookup_by_name(instance_name) return True except exception.NovaException: return False # TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed. def list_instance_ids(self): if self._conn.numOfDomains() == 0: return [] return self._conn.listDomainsID() def list_instances(self): names = [] for domain_id in self.list_instance_ids(): try: # We skip domains with ID 0 (hypervisors). if domain_id != 0: domain = self._lookup_by_id(domain_id) names.append(domain.name()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue # extend instance list to contain also defined domains names.extend([vm for vm in self._conn.listDefinedDomains() if vm not in names]) return names def list_instance_uuids(self): uuids = set() for domain_id in self.list_instance_ids(): try: # We skip domains with ID 0 (hypervisors). if domain_id != 0: domain = self._lookup_by_id(domain_id) uuids.add(domain.UUIDString()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue # extend instance list to contain also defined domains for domain_name in self._conn.listDefinedDomains(): try: uuids.add(self._lookup_by_name(domain_name).UUIDString()) except exception.InstanceNotFound: # Ignore deleted instance while listing continue return list(uuids) def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" for vif in network_info: self.vif_driver.plug(instance, vif) def unplug_vifs(self, instance, network_info, ignore_errors=False): """Unplug VIFs from networks.""" for vif in network_info: try: self.vif_driver.unplug(instance, vif) except exception.NovaException: if not ignore_errors: raise def _teardown_container(self, instance): inst_path = libvirt_utils.get_instance_path(instance) container_dir = os.path.join(inst_path, 'rootfs') container_root_device = instance.get('root_device_name') disk.teardown_container(container_dir, container_root_device) def _destroy(self, instance): try: virt_dom = self._lookup_by_name(instance['name']) except exception.InstanceNotFound: virt_dom = None # If the instance is already terminated, we're still happy # Otherwise, destroy it old_domid = -1 if virt_dom is not None: try: old_domid = virt_dom.ID() virt_dom.destroy() # NOTE(GuanQiang): teardown container to avoid resource leak if CONF.libvirt.virt_type == 'lxc': self._teardown_container(instance) except libvirt.libvirtError as e: is_okay = False errcode = e.get_error_code() if errcode == libvirt.VIR_ERR_OPERATION_INVALID: # If the instance is already shut off, we get this: # Code=55 Error=Requested operation is not valid: # domain is not running (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() state = LIBVIRT_POWER_STATE[state] if state == power_state.SHUTDOWN: is_okay = True elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT: LOG.warn(_("Cannot destroy instance, operation time out"), instance=instance) reason = _("operation time out") raise exception.InstancePowerOffFailure(reason=reason) if not is_okay: with excutils.save_and_reraise_exception(): LOG.error(_('Error from libvirt during destroy. ' 'Code=%(errcode)s Error=%(e)s'), {'errcode': errcode, 'e': e}, instance=instance) def _wait_for_destroy(expected_domid): """Called at an interval until the VM is gone.""" # NOTE(vish): If the instance disappears during the destroy # we ignore it so the cleanup can still be # attempted because we would prefer destroy to # never fail. try: dom_info = self.get_info(instance) state = dom_info['state'] new_domid = dom_info['id'] except exception.InstanceNotFound: LOG.error(_("During wait destroy, instance disappeared."), instance=instance) raise loopingcall.LoopingCallDone() if state == power_state.SHUTDOWN: LOG.info(_("Instance destroyed successfully."), instance=instance) raise loopingcall.LoopingCallDone() # NOTE(wangpan): If the instance was booted again after destroy, # this may be a endless loop, so check the id of # domain here, if it changed and the instance is # still running, we should destroy it again. # see https://bugs.launchpad.net/nova/+bug/1111213 for more details if new_domid != expected_domid: LOG.info(_("Instance may be started again."), instance=instance) kwargs['is_running'] = True raise loopingcall.LoopingCallDone() kwargs = {'is_running': False} timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy, old_domid) timer.start(interval=0.5).wait() if kwargs['is_running']: LOG.info(_("Going to destroy instance again."), instance=instance) self._destroy(instance) def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True): self._destroy(instance) self.cleanup(context, instance, network_info, block_device_info, destroy_disks) def _undefine_domain(self, instance): try: virt_dom = self._lookup_by_name(instance['name']) except exception.InstanceNotFound: virt_dom = None if virt_dom: try: try: virt_dom.undefineFlags( libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE) except libvirt.libvirtError: LOG.debug(_("Error from libvirt during undefineFlags." " Retrying with undefine"), instance=instance) virt_dom.undefine() except AttributeError: # NOTE(vish): Older versions of libvirt don't support # undefine flags, so attempt to do the # right thing. try: if virt_dom.hasManagedSaveImage(0): virt_dom.managedSaveRemove(0) except AttributeError: pass virt_dom.undefine() except libvirt.libvirtError as e: with excutils.save_and_reraise_exception(): errcode = e.get_error_code() LOG.error(_('Error from libvirt during undefine. ' 'Code=%(errcode)s Error=%(e)s') % {'errcode': errcode, 'e': e}, instance=instance) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): self._undefine_domain(instance) self.unplug_vifs(instance, network_info, ignore_errors=True) retry = True while retry: try: self.firewall_driver.unfilter_instance(instance, network_info=network_info) except libvirt.libvirtError as e: try: state = self.get_info(instance)['state'] except exception.InstanceNotFound: state = power_state.SHUTDOWN if state != power_state.SHUTDOWN: LOG.warn(_("Instance may be still running, destroy " "it again."), instance=instance) self._destroy(instance) else: retry = False errcode = e.get_error_code() LOG.exception(_('Error from libvirt during unfilter. ' 'Code=%(errcode)s Error=%(e)s') % {'errcode': errcode, 'e': e}, instance=instance) reason = "Error unfiltering instance." raise exception.InstanceTerminationFailure(reason=reason) except Exception: retry = False raise else: retry = False # FIXME(wangpan): if the instance is booted again here, such as the # the soft reboot operation boot it here, it will # become "running deleted", should we check and destroy # it at the end of this method? # NOTE(vish): we disconnect from volumes regardless block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_dev = vol['mount_device'].rpartition("/")[2] if ('data' in connection_info and 'volume_id' in connection_info['data']): volume_id = connection_info['data']['volume_id'] encryption = encryptors.get_encryption_metadata( context, self._volume_api, volume_id, connection_info) if encryption: # The volume must be detached from the VM before # disconnecting it from its encryptor. Otherwise, the # encryptor may report that the volume is still in use. encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.detach_volume(**encryption) try: self.volume_driver_method('disconnect_volume', connection_info, disk_dev) except Exception as exc: with excutils.save_and_reraise_exception() as ctxt: if destroy_disks: # Don't block on Volume errors if we're trying to # delete the instance as we may be patially created # or deleted ctxt.reraise = False LOG.warn(_("Ignoring Volume Error on vol %(vol_id)s " "during delete %(exc)s"), {'vol_id': vol.get('volume_id'), 'exc': exc}, instance=instance) if destroy_disks: self._delete_instance_files(instance) self._cleanup_lvm(instance) #NOTE(haomai): destroy volumes if needed if CONF.libvirt.images_type == 'rbd': self._cleanup_rbd(instance) def _cleanup_rbd(self, instance): pool = CONF.libvirt.images_rbd_pool volumes = libvirt_utils.list_rbd_volumes(pool) pattern = instance['uuid'] def belongs_to_instance(disk): return disk.startswith(pattern) volumes = filter(belongs_to_instance, volumes) if volumes: libvirt_utils.remove_rbd_volumes(pool, *volumes) def _cleanup_lvm(self, instance): """Delete all LVM disks for given instance object.""" disks = self._lvm_disks(instance) if disks: libvirt_utils.remove_logical_volumes(*disks) def _lvm_disks(self, instance): """Returns all LVM disks for given instance object.""" if CONF.libvirt.images_volume_group: vg = os.path.join('/dev', CONF.libvirt.images_volume_group) if not os.path.exists(vg): return [] pattern = '%s_' % instance['uuid'] # TODO(sdague): remove in Juno def belongs_to_instance_legacy(disk): # We don't want to leak old disks, but at the same time, we # don't want to do an unsafe thing. So we will only handle # the old filter if it's the system default still. pattern = '%s_' % instance['name'] if disk.startswith(pattern): if CONF.instance_name_template == 'instance-%08x': return True else: LOG.warning(_('Volume %(disk)s possibly unsafe to ' 'remove, please clean up manually'), {'disk': disk}) return False def belongs_to_instance(disk): return disk.startswith(pattern) def fullpath(name): return os.path.join(vg, name) logical_volumes = libvirt_utils.list_logical_volumes(vg) disk_names = filter(belongs_to_instance, logical_volumes) # TODO(sdague): remove in Juno disk_names.extend( filter(belongs_to_instance_legacy, logical_volumes) ) disks = map(fullpath, disk_names) return disks return [] def get_volume_connector(self, instance): if not self._initiator: self._initiator = libvirt_utils.get_iscsi_initiator() if not self._initiator: LOG.debug(_('Could not determine iscsi initiator name'), instance=instance) if not self._fc_wwnns: self._fc_wwnns = libvirt_utils.get_fc_wwnns() if not self._fc_wwnns or len(self._fc_wwnns) == 0: LOG.debug(_('Could not determine fibre channel ' 'world wide node names'), instance=instance) if not self._fc_wwpns: self._fc_wwpns = libvirt_utils.get_fc_wwpns() if not self._fc_wwpns or len(self._fc_wwpns) == 0: LOG.debug(_('Could not determine fibre channel ' 'world wide port names'), instance=instance) connector = {'ip': CONF.my_ip, 'host': CONF.host} if self._initiator: connector['initiator'] = self._initiator if self._fc_wwnns and self._fc_wwpns: connector["wwnns"] = self._fc_wwnns connector["wwpns"] = self._fc_wwpns return connector def _cleanup_resize(self, instance, network_info): # NOTE(wangpan): we get the pre-grizzly instance path firstly, # so the backup dir of pre-grizzly instance can # be deleted correctly with grizzly or later nova. pre_grizzly_name = libvirt_utils.get_instance_path(instance, forceold=True) target = pre_grizzly_name + '_resize' if not os.path.exists(target): target = libvirt_utils.get_instance_path(instance) + '_resize' if os.path.exists(target): # Deletion can fail over NFS, so retry the deletion as required. # Set maximum attempt as 5, most test can remove the directory # for the second time. utils.execute('rm', '-rf', target, delay_on_retry=True, attempts=5) if instance['host'] != CONF.host: self._undefine_domain(instance) self.unplug_vifs(instance, network_info) self.firewall_driver.unfilter_instance(instance, network_info) def volume_driver_method(self, method_name, connection_info, *args, **kwargs): driver_type = connection_info.get('driver_volume_type') if driver_type not in self.volume_drivers: raise exception.VolumeDriverNotFound(driver_type=driver_type) driver = self.volume_drivers[driver_type] method = getattr(driver, method_name) return method(connection_info, *args, **kwargs) def _get_volume_encryptor(self, connection_info, encryption): encryptor = encryptors.get_volume_encryptor(connection_info, **encryption) return encryptor def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): instance_name = instance['name'] virt_dom = self._lookup_by_name(instance_name) disk_dev = mountpoint.rpartition("/")[2] bdm = { 'device_name': disk_dev, 'disk_bus': disk_bus, 'device_type': device_type} # Note(cfb): If the volume has a custom block size, check that # that we are using QEMU/KVM and libvirt >= 0.10.2. The # presence of a block size is considered mandatory by # cinder so we fail if we can't honor the request. data = {} if ('data' in connection_info): data = connection_info['data'] if ('logical_block_size' in data or 'physical_block_size' in data): if ((CONF.libvirt.virt_type != "kvm" and CONF.libvirt.virt_type != "qemu")): msg = _("Volume sets block size, but the current " "libvirt hypervisor '%s' does not support custom " "block size") % CONF.libvirt.virt_type raise exception.InvalidHypervisorType(msg) if not self.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION): ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION]) msg = _("Volume sets block size, but libvirt '%s' or later is " "required.") % ver raise exception.Invalid(msg) disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type, bdm) conf = self.volume_driver_method('connect_volume', connection_info, disk_info) self.set_cache_mode(conf) try: # NOTE(vish): We can always affect config because our # domains are persistent, but we should only # affect live if the domain is running. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state in (power_state.RUNNING, power_state.PAUSED): flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE # cache device_path in connection_info -- required by encryptors if 'data' in connection_info: connection_info['data']['device_path'] = conf.source_path if encryption: encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.attach_volume(context, **encryption) virt_dom.attachDeviceFlags(conf.to_xml(), flags) except Exception as ex: if isinstance(ex, libvirt.libvirtError): errcode = ex.get_error_code() if errcode == libvirt.VIR_ERR_OPERATION_FAILED: self.volume_driver_method('disconnect_volume', connection_info, disk_dev) raise exception.DeviceIsBusy(device=disk_dev) with excutils.save_and_reraise_exception(): self.volume_driver_method('disconnect_volume', connection_info, disk_dev) def _swap_volume(self, domain, disk_path, new_path): """Swap existing disk with a new block device.""" # Save a copy of the domain's running XML file xml = domain.XMLDesc(0) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. try: domain.blockJobAbort(disk_path, 0) except Exception: pass try: # NOTE (rmk): blockRebase cannot be executed on persistent # domains, so we need to temporarily undefine it. # If any part of this block fails, the domain is # re-defined regardless. if domain.isPersistent(): domain.undefine() # Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to # allow writing to existing external volume file domain.blockRebase(disk_path, new_path, 0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT) while self._wait_for_block_job(domain, disk_path): time.sleep(0.5) domain.blockJobAbort(disk_path, libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) finally: self._conn.defineXML(xml) def swap_volume(self, old_connection_info, new_connection_info, instance, mountpoint): instance_name = instance['name'] virt_dom = self._lookup_by_name(instance_name) disk_dev = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev) if not xml: raise exception.DiskNotFound(location=disk_dev) disk_info = { 'dev': disk_dev, 'bus': blockinfo.get_disk_bus_for_disk_dev( CONF.libvirt.virt_type, disk_dev), 'type': 'disk', } conf = self.volume_driver_method('connect_volume', new_connection_info, disk_info) if not conf.source_path: self.volume_driver_method('disconnect_volume', new_connection_info, disk_dev) raise NotImplementedError(_("Swap only supports host devices")) self._swap_volume(virt_dom, disk_dev, conf.source_path) self.volume_driver_method('disconnect_volume', old_connection_info, disk_dev) @staticmethod def _get_disk_xml(xml, device): """Returns the xml for the disk mounted at device.""" try: doc = etree.fromstring(xml) except Exception: return None ret = doc.findall('./devices/disk') for node in ret: for child in node.getchildren(): if child.tag == 'target': if child.get('dev') == device: return etree.tostring(node) def _get_existing_domain_xml(self, instance, network_info, block_device_info=None): try: virt_dom = self._lookup_by_name(instance['name']) xml = virt_dom.XMLDesc(0) except exception.InstanceNotFound: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info) xml = self.to_xml(nova_context.get_admin_context(), instance, network_info, disk_info, block_device_info=block_device_info) return xml def detach_volume(self, connection_info, instance, mountpoint, encryption=None): instance_name = instance['name'] disk_dev = mountpoint.rpartition("/")[2] try: virt_dom = self._lookup_by_name(instance_name) xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev) if not xml: raise exception.DiskNotFound(location=disk_dev) else: # NOTE(vish): We can always affect config because our # domains are persistent, but we should only # affect live if the domain is running. flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state in (power_state.RUNNING, power_state.PAUSED): flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE virt_dom.detachDeviceFlags(xml, flags) if encryption: # The volume must be detached from the VM before # disconnecting it from its encryptor. Otherwise, the # encryptor may report that the volume is still in use. encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.detach_volume(**encryption) except exception.InstanceNotFound: # NOTE(zhaoqin): If the instance does not exist, _lookup_by_name() # will throw InstanceNotFound exception. Need to # disconnect volume under this circumstance. LOG.warn(_("During detach_volume, instance disappeared.")) except libvirt.libvirtError as ex: # NOTE(vish): This is called to cleanup volumes after live # migration, so we should still disconnect even if # the instance doesn't exist here anymore. error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: # NOTE(vish): LOG.warn(_("During detach_volume, instance disappeared.")) else: raise self.volume_driver_method('disconnect_volume', connection_info, disk_dev) def attach_interface(self, instance, image_meta, vif): virt_dom = self._lookup_by_name(instance['name']) flavor = flavor_obj.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) self.vif_driver.plug(instance, vif) self.firewall_driver.setup_basic_filtering(instance, [vif]) cfg = self.vif_driver.get_config(instance, vif, image_meta, flavor) try: flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state == power_state.RUNNING or state == power_state.PAUSED: flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE virt_dom.attachDeviceFlags(cfg.to_xml(), flags) except libvirt.libvirtError: LOG.error(_('attaching network adapter failed.'), instance=instance) self.vif_driver.unplug(instance, vif) raise exception.InterfaceAttachFailed(instance) def detach_interface(self, instance, vif): virt_dom = self._lookup_by_name(instance['name']) flavor = flavor_obj.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) cfg = self.vif_driver.get_config(instance, vif, None, flavor) try: self.vif_driver.unplug(instance, vif) flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG state = LIBVIRT_POWER_STATE[virt_dom.info()[0]] if state == power_state.RUNNING or state == power_state.PAUSED: flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE virt_dom.detachDeviceFlags(cfg.to_xml(), flags) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: LOG.warn(_("During detach_interface, " "instance disappeared."), instance=instance) else: LOG.error(_('detaching network adapter failed.'), instance=instance) raise exception.InterfaceDetachFailed(instance) def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name): metadata = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'], } } if instance['os_type']: metadata['properties']['os_type'] = instance['os_type'] # NOTE(vish): glance forces ami disk format to be ami if base.get('disk_format') == 'ami': metadata['disk_format'] = 'ami' else: metadata['disk_format'] = img_fmt metadata['container_format'] = base.get('container_format', 'bare') return metadata def snapshot(self, context, instance, image_href, update_task_state): """Create snapshot from a running VM instance. This command only works with qemu 0.14+ """ try: virt_dom = self._lookup_by_name(instance['name']) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance['uuid']) (image_service, image_id) = glance.get_remote_image_service( context, instance['image_ref']) base = compute_utils.get_image_metadata( context, image_service, image_id, instance) _image_service = glance.get_remote_image_service(context, image_href) snapshot_image_service, snapshot_image_id = _image_service snapshot = snapshot_image_service.show(context, snapshot_image_id) disk_path = libvirt_utils.find_disk(virt_dom) source_format = libvirt_utils.get_disk_type(disk_path) image_format = CONF.libvirt.snapshot_image_format or source_format # NOTE(bfilippov): save lvm and rbd as raw if image_format == 'lvm' or image_format == 'rbd': image_format = 'raw' metadata = self._create_snapshot_metadata(base, instance, image_format, snapshot['name']) snapshot_name = uuid.uuid4().hex (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() state = LIBVIRT_POWER_STATE[state] # NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0. # These restrictions can be relaxed as other configurations # can be validated. if self.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION, MIN_QEMU_LIVESNAPSHOT_VERSION, REQ_HYPERVISOR_LIVESNAPSHOT) \ and not source_format == "lvm" and not source_format == 'rbd': live_snapshot = True # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. This operation also # confirms the running instance, as opposed to the system as a # whole, has a new enough version of the hypervisor (bug 1193146). try: virt_dom.blockJobAbort(disk_path, 0) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED: live_snapshot = False else: pass else: live_snapshot = False # NOTE(rmk): We cannot perform live snapshots when a managedSave # file is present, so we will use the cold/legacy method # for instances which are shutdown. if state == power_state.SHUTDOWN: live_snapshot = False # NOTE(dkang): managedSave does not work for LXC if CONF.libvirt.virt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING or state == power_state.PAUSED: self._detach_pci_devices(virt_dom, pci_manager.get_instance_pci_devs(instance)) virt_dom.managedSave(0) snapshot_backend = self.image_backend.snapshot(disk_path, image_type=source_format) if live_snapshot: LOG.info(_("Beginning live snapshot process"), instance=instance) else: LOG.info(_("Beginning cold snapshot process"), instance=instance) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) snapshot_directory = CONF.libvirt.snapshots_directory fileutils.ensure_tree(snapshot_directory) with utils.tempdir(dir=snapshot_directory) as tmpdir: try: out_path = os.path.join(tmpdir, snapshot_name) if live_snapshot: # NOTE(xqueralt): libvirt needs o+x in the temp directory os.chmod(tmpdir, 0o701) self._live_snapshot(virt_dom, disk_path, out_path, image_format) else: snapshot_backend.snapshot_extract(out_path, image_format) finally: new_dom = None # NOTE(dkang): because previous managedSave is not called # for LXC, _create_domain must not be called. if CONF.libvirt.virt_type != 'lxc' and not live_snapshot: if state == power_state.RUNNING: new_dom = self._create_domain(domain=virt_dom) elif state == power_state.PAUSED: new_dom = self._create_domain(domain=virt_dom, launch_flags=libvirt.VIR_DOMAIN_START_PAUSED) if new_dom is not None: self._attach_pci_devices(new_dom, pci_manager.get_instance_pci_devs(instance)) LOG.info(_("Snapshot extracted, beginning image upload"), instance=instance) # Upload that image to the image service update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) with libvirt_utils.file_open(out_path) as image_file: image_service.update(context, image_href, metadata, image_file) LOG.info(_("Snapshot image upload complete"), instance=instance) @staticmethod def _wait_for_block_job(domain, disk_path, abort_on_error=False): """Wait for libvirt block job to complete. Libvirt may return either cur==end or an empty dict when the job is complete, depending on whether the job has been cleaned up by libvirt yet, or not. :returns: True if still in progress False if completed """ status = domain.blockJobInfo(disk_path, 0) if status == -1 and abort_on_error: msg = _('libvirt error while requesting blockjob info.') raise exception.NovaException(msg) try: cur = status.get('cur', 0) end = status.get('end', 0) except Exception: return False if cur == end: return False else: return True def _live_snapshot(self, domain, disk_path, out_path, image_format): """Snapshot an instance without downtime.""" # Save a copy of the domain's running XML file xml = domain.XMLDesc(0) # Abort is an idempotent operation, so make sure any block # jobs which may have failed are ended. try: domain.blockJobAbort(disk_path, 0) except Exception: pass # NOTE (rmk): We are using shallow rebases as a workaround to a bug # in QEMU 1.3. In order to do this, we need to create # a destination image with the original backing file # and matching size of the instance root disk. src_disk_size = libvirt_utils.get_disk_size(disk_path) src_back_path = libvirt_utils.get_disk_backing_file(disk_path, basename=False) disk_delta = out_path + '.delta' libvirt_utils.create_cow_image(src_back_path, disk_delta, src_disk_size) try: # NOTE (rmk): blockRebase cannot be executed on persistent # domains, so we need to temporarily undefine it. # If any part of this block fails, the domain is # re-defined regardless. if domain.isPersistent(): domain.undefine() # NOTE (rmk): Establish a temporary mirror of our root disk and # issue an abort once we have a complete copy. domain.blockRebase(disk_path, disk_delta, 0, libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW) while self._wait_for_block_job(domain, disk_path): time.sleep(0.5) domain.blockJobAbort(disk_path, 0) libvirt_utils.chown(disk_delta, os.getuid()) finally: self._conn.defineXML(xml) # Convert the delta (CoW) image with a backing file to a flat # image with no backing file. libvirt_utils.extract_snapshot(disk_delta, 'qcow2', out_path, image_format) def _volume_snapshot_update_status(self, context, snapshot_id, status): """Send a snapshot status update to Cinder. This method captures and logs exceptions that occur since callers cannot do anything useful with these exceptions. Operations on the Cinder side waiting for this will time out if a failure occurs sending the update. :param context: security context :param snapshot_id: id of snapshot being updated :param status: new status value """ try: self._volume_api.update_snapshot_status(context, snapshot_id, status) except Exception: msg = _('Failed to send updated snapshot status ' 'to volume service.') LOG.exception(msg) def _volume_snapshot_create(self, context, instance, domain, volume_id, snapshot_id, new_file): """Perform volume snapshot. :param domain: VM that volume is attached to :param volume_id: volume UUID to snapshot :param snapshot_id: UUID of snapshot being created :param new_file: relative path to new qcow2 file present on share """ xml = domain.XMLDesc(0) xml_doc = etree.fromstring(xml) device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) disks_to_snap = [] # to be snapshotted by libvirt disks_to_skip = [] # local disks not snapshotted for disk in device_info.devices: if (disk.root_name != 'disk'): continue if (disk.target_dev is None): continue if (disk.serial is None or disk.serial != volume_id): disks_to_skip.append(disk.source_path) continue # disk is a Cinder volume with the correct volume_id disk_info = { 'dev': disk.target_dev, 'serial': disk.serial, 'current_file': disk.source_path } # Determine path for new_file based on current path current_file = disk_info['current_file'] new_file_path = os.path.join(os.path.dirname(current_file), new_file) disks_to_snap.append((current_file, new_file_path)) if not disks_to_snap: msg = _('Found no disk to snapshot.') raise exception.NovaException(msg) snapshot = vconfig.LibvirtConfigGuestSnapshot() for current_name, new_filename in disks_to_snap: snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk() snap_disk.name = current_name snap_disk.source_path = new_filename snap_disk.source_type = 'file' snap_disk.snapshot = 'external' snap_disk.driver_name = 'qcow2' snapshot.add_disk(snap_disk) for dev in disks_to_skip: snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk() snap_disk.name = dev snap_disk.snapshot = 'no' snapshot.add_disk(snap_disk) snapshot_xml = snapshot.to_xml() LOG.debug(_("snap xml: %s") % snapshot_xml) snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE try: domain.snapshotCreateXML(snapshot_xml, snap_flags | QUIESCE) return except libvirt.libvirtError: msg = _('Unable to create quiesced VM snapshot, ' 'attempting again with quiescing disabled.') LOG.exception(msg) try: domain.snapshotCreateXML(snapshot_xml, snap_flags) except libvirt.libvirtError: msg = _('Unable to create VM snapshot, ' 'failing volume_snapshot operation.') LOG.exception(msg) raise def _volume_refresh_connection_info(self, context, instance, volume_id): bdm = block_device_obj.BlockDeviceMapping.get_by_volume_id(context, volume_id) driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm) driver_bdm.refresh_connection_info(context, instance, self._volume_api, self) def volume_snapshot_create(self, context, instance, volume_id, create_info): """Create snapshots of a Cinder volume via libvirt. :param instance: VM instance object reference :param volume_id: id of volume being snapshotted :param create_info: dict of information used to create snapshots - snapshot_id : ID of snapshot - type : qcow2 / - new_file : qcow2 file created by Cinder which becomes the VM's active image after the snapshot is complete """ LOG.debug(_("volume_snapshot_create: create_info: %(c_info)s"), {'c_info': create_info}, instance=instance) try: virt_dom = self._lookup_by_name(instance.name) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) if create_info['type'] != 'qcow2': raise exception.NovaException(_('Unknown type: %s') % create_info['type']) snapshot_id = create_info.get('snapshot_id', None) if snapshot_id is None: raise exception.NovaException(_('snapshot_id required ' 'in create_info')) try: self._volume_snapshot_create(context, instance, virt_dom, volume_id, snapshot_id, create_info['new_file']) except Exception: with excutils.save_and_reraise_exception(): msg = _('Error occurred during volume_snapshot_create, ' 'sending error status to Cinder.') LOG.exception(msg) self._volume_snapshot_update_status( context, snapshot_id, 'error') self._volume_snapshot_update_status( context, snapshot_id, 'creating') def _wait_for_snapshot(): snapshot = self._volume_api.get_snapshot(context, snapshot_id) if snapshot.get('status') != 'creating': self._volume_refresh_connection_info(context, instance, volume_id) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot) timer.start(interval=0.5).wait() def _volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info=None): """Note: if file being merged into == active image: do a blockRebase (pull) operation else: do a blockCommit operation Files must be adjacent in snap chain. :param instance: instance object reference :param volume_id: volume UUID :param snapshot_id: snapshot UUID (unused currently) :param delete_info: { 'type': 'qcow2', 'file_to_merge': 'a.img', 'merge_target_file': 'b.img' or None (if merging file_to_merge into active image) } Libvirt blockjob handling required for this method is broken in versions of libvirt that do not contain: http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1) (Patch is pending in 1.0.5-maint branch as well, but we cannot detect libvirt 1.0.5.5 vs. 1.0.5.6 here.) """ if not self.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION): ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION]) msg = _("Libvirt '%s' or later is required for online deletion " "of volume snapshots.") % ver raise exception.Invalid(msg) LOG.debug(_('volume_snapshot_delete: delete_info: %s') % delete_info) if delete_info['type'] != 'qcow2': msg = _('Unknown delete_info type %s') % delete_info['type'] raise exception.NovaException(msg) try: virt_dom = self._lookup_by_name(instance.name) except exception.InstanceNotFound: raise exception.InstanceNotRunning(instance_id=instance.uuid) ##### Find dev name my_dev = None active_disk = None xml = virt_dom.XMLDesc(0) xml_doc = etree.fromstring(xml) device_info = vconfig.LibvirtConfigGuest() device_info.parse_dom(xml_doc) for disk in device_info.devices: if (disk.root_name != 'disk'): continue if (disk.target_dev is None or disk.serial is None): continue if disk.serial == volume_id: my_dev = disk.target_dev active_disk = disk.source_path if my_dev is None or active_disk is None: msg = _('Unable to locate disk matching id: %s') % volume_id raise exception.NovaException(msg) LOG.debug(_("found dev, it's %(dev)s, with active disk: %(disk)s"), {'dev': my_dev, 'disk': active_disk}) if delete_info['merge_target_file'] is None: # pull via blockRebase() # Merge the most recent snapshot into the active image rebase_disk = my_dev rebase_base = delete_info['file_to_merge'] rebase_bw = 0 rebase_flags = 0 LOG.debug(_('disk: %(disk)s, base: %(base)s, ' 'bw: %(bw)s, flags: %(flags)s') % {'disk': rebase_disk, 'base': rebase_base, 'bw': rebase_bw, 'flags': rebase_flags}) result = virt_dom.blockRebase(rebase_disk, rebase_base, rebase_bw, rebase_flags) if result == 0: LOG.debug(_('blockRebase started successfully')) while self._wait_for_block_job(virt_dom, rebase_disk, abort_on_error=True): LOG.debug(_('waiting for blockRebase job completion')) time.sleep(0.5) else: # commit with blockCommit() commit_disk = my_dev commit_base = delete_info['merge_target_file'] commit_top = delete_info['file_to_merge'] bandwidth = 0 flags = 0 result = virt_dom.blockCommit(commit_disk, commit_base, commit_top, bandwidth, flags) if result == 0: LOG.debug(_('blockCommit started successfully')) while self._wait_for_block_job(virt_dom, commit_disk, abort_on_error=True): LOG.debug(_('waiting for blockCommit job completion')) time.sleep(0.5) def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id, delete_info=None): try: self._volume_snapshot_delete(context, instance, volume_id, snapshot_id, delete_info=delete_info) except Exception: with excutils.save_and_reraise_exception(): msg = _('Error occurred during volume_snapshot_delete, ' 'sending error status to Cinder.') LOG.exception(msg) self._volume_snapshot_update_status( context, snapshot_id, 'error_deleting') self._volume_snapshot_update_status(context, snapshot_id, 'deleting') self._volume_refresh_connection_info(context, instance, volume_id) def reboot(self, context, instance, network_info, reboot_type='SOFT', block_device_info=None, bad_volumes_callback=None): """Reboot a virtual machine, given an instance reference.""" if reboot_type == 'SOFT': # NOTE(vish): This will attempt to do a graceful shutdown/restart. try: soft_reboot_success = self._soft_reboot(instance) except libvirt.libvirtError as e: LOG.debug(_("Instance soft reboot failed: %s"), e) soft_reboot_success = False if soft_reboot_success: LOG.info(_("Instance soft rebooted successfully."), instance=instance) return else: LOG.warn(_("Failed to soft reboot instance. " "Trying hard reboot."), instance=instance) return self._hard_reboot(context, instance, network_info, block_device_info) def _soft_reboot(self, instance): """Attempt to shutdown and restart the instance gracefully. We use shutdown and create here so we can return if the guest responded and actually rebooted. Note that this method only succeeds if the guest responds to acpi. Therefore we return success or failure so we can fall back to a hard reboot if necessary. :returns: True if the reboot succeeded """ dom = self._lookup_by_name(instance["name"]) (state, _max_mem, _mem, _cpus, _t) = dom.info() state = LIBVIRT_POWER_STATE[state] old_domid = dom.ID() # NOTE(vish): This check allows us to reboot an instance that # is already shutdown. if state == power_state.RUNNING: dom.shutdown() # NOTE(vish): This actually could take slightly longer than the # FLAG defines depending on how long the get_info # call takes to return. self._prepare_pci_devices_for_use( pci_manager.get_instance_pci_devs(instance)) for x in xrange(CONF.libvirt.wait_soft_reboot_seconds): dom = self._lookup_by_name(instance["name"]) (state, _max_mem, _mem, _cpus, _t) = dom.info() state = LIBVIRT_POWER_STATE[state] new_domid = dom.ID() # NOTE(ivoks): By checking domain IDs, we make sure we are # not recreating domain that's already running. if old_domid != new_domid: if state in [power_state.SHUTDOWN, power_state.CRASHED]: LOG.info(_("Instance shutdown successfully."), instance=instance) self._create_domain(domain=dom) timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() return True else: LOG.info(_("Instance may have been rebooted during soft " "reboot, so return now."), instance=instance) return True greenthread.sleep(1) return False def _hard_reboot(self, context, instance, network_info, block_device_info=None): """Reboot a virtual machine, given an instance reference. Performs a Libvirt reset (if supported) on the domain. If Libvirt reset is unavailable this method actually destroys and re-creates the domain to ensure the reboot happens, as the guest OS cannot ignore this action. If xml is set, it uses the passed in xml in place of the xml from the existing domain. """ self._destroy(instance) # Get the system metadata from the instance system_meta = utils.instance_sys_meta(instance) # Convert the system metadata to image metadata image_meta = utils.get_image_from_system_metadata(system_meta) if not image_meta: image_ref = instance.get('image_ref') service, image_id = glance.get_remote_image_service(context, image_ref) image_meta = compute_utils.get_image_metadata(context, service, image_id, instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta) # NOTE(vish): This could generate the wrong device_format if we are # using the raw backend and the images don't exist yet. # The create_images_and_backing below doesn't properly # regenerate raw backend images, however, so when it # does we need to (re)generate the xml after the images # are in place. xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) # NOTE (rmk): Re-populate any missing backing files. disk_info_json = self.get_instance_disk_info(instance['name'], xml, block_device_info) instance_dir = libvirt_utils.get_instance_path(instance) self._create_images_and_backing(context, instance, instance_dir, disk_info_json) # Initialize all the necessary networking, block devices and # start the instance. self._create_domain_and_network(context, xml, instance, network_info, block_device_info, reboot=True, vifs_already_plugged=True) self._prepare_pci_devices_for_use( pci_manager.get_instance_pci_devs(instance)) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" state = self.get_info(instance)['state'] if state == power_state.RUNNING: LOG.info(_("Instance rebooted successfully."), instance=instance) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot) timer.start(interval=0.5).wait() def pause(self, instance): """Pause VM instance.""" dom = self._lookup_by_name(instance['name']) dom.suspend() def unpause(self, instance): """Unpause paused VM instance.""" dom = self._lookup_by_name(instance['name']) dom.resume() def power_off(self, instance): """Power off the specified instance.""" self._destroy(instance) def power_on(self, context, instance, network_info, block_device_info=None): """Power on the specified instance.""" # We use _hard_reboot here to ensure that all backing files, # network, and block device connections, etc. are established # and available before we attempt to start the instance. self._hard_reboot(context, instance, network_info, block_device_info) def suspend(self, instance): """Suspend the specified instance.""" dom = self._lookup_by_name(instance['name']) self._detach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance)) dom.managedSave(0) def resume(self, context, instance, network_info, block_device_info=None): """resume the specified instance.""" xml = self._get_existing_domain_xml(instance, network_info, block_device_info) dom = self._create_domain_and_network(context, xml, instance, network_info, block_device_info=block_device_info, vifs_already_plugged=True) self._attach_pci_devices(dom, pci_manager.get_instance_pci_devs(instance)) def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """resume guest state when a host is booted.""" # Check if the instance is running already and avoid doing # anything if it is. if self.instance_exists(instance['name']): domain = self._lookup_by_name(instance['name']) state = LIBVIRT_POWER_STATE[domain.info()[0]] ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) if state in ignored_states: return # Instance is not up and could be in an unknown state. # Be as absolute as possible about getting it back into # a known and running state. self._hard_reboot(context, instance, network_info, block_device_info) def rescue(self, context, instance, network_info, image_meta, rescue_password): """Loads a VM using rescue images. A rescue is normally performed when something goes wrong with the primary images and data needs to be corrected/recovered. Rescuing should not edit or over-ride the original image, only allow for data recovery. """ instance_dir = libvirt_utils.get_instance_path(instance) unrescue_xml = self._get_existing_domain_xml(instance, network_info) unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml) rescue_images = { 'image_id': CONF.libvirt.rescue_image_id or instance['image_ref'], 'kernel_id': (CONF.libvirt.rescue_kernel_id or instance['kernel_id']), 'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or instance['ramdisk_id']), } disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, None, image_meta, rescue=True) self._create_image(context, instance, disk_info['mapping'], '.rescue', rescue_images, network_info=network_info, admin_pass=rescue_password) xml = self.to_xml(context, instance, network_info, disk_info, image_meta, rescue=rescue_images, write_to_disk=True) self._destroy(instance) self._create_domain(xml) def unrescue(self, instance, network_info): """Reboot the VM which is being rescued back into primary images. """ instance_dir = libvirt_utils.get_instance_path(instance) unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml') xml = libvirt_utils.load_file(unrescue_xml_path) virt_dom = self._lookup_by_name(instance['name']) self._destroy(instance) self._create_domain(xml, virt_dom) libvirt_utils.file_delete(unrescue_xml_path) rescue_files = os.path.join(instance_dir, "*.rescue") for rescue_file in glob.iglob(rescue_files): libvirt_utils.file_delete(rescue_file) def poll_rebooting_instances(self, timeout, instances): pass def _enable_hairpin(self, xml): interfaces = self.get_interfaces(xml) for interface in interfaces: utils.execute('tee', '/sys/class/net/%s/brport/hairpin_mode' % interface, process_input='1', run_as_root=True, check_exit_code=[0, 1]) # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta) self._create_image(context, instance, disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, files=injected_files, admin_pass=admin_password) xml = self.to_xml(context, instance, network_info, disk_info, image_meta, block_device_info=block_device_info, write_to_disk=True) self._create_domain_and_network(context, xml, instance, network_info, block_device_info) LOG.debug(_("Instance is running"), instance=instance) def _wait_for_boot(): """Called at an interval until the VM is running.""" state = self.get_info(instance)['state'] if state == power_state.RUNNING: LOG.info(_("Instance spawned successfully."), instance=instance) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=0.5).wait() def _flush_libvirt_console(self, pty): out, err = utils.execute('dd', 'if=%s' % pty, 'iflag=nonblock', run_as_root=True, check_exit_code=False) return out def _append_to_file(self, data, fpath): LOG.info(_('data: %(data)r, fpath: %(fpath)r'), {'data': data, 'fpath': fpath}) fp = open(fpath, 'a+') fp.write(data) return fpath def get_console_output(self, context, instance): virt_dom = self._lookup_by_name(instance.name) xml = virt_dom.XMLDesc(0) tree = etree.fromstring(xml) console_types = {} # NOTE(comstud): We want to try 'file' types first, then try 'pty' # types. We can't use Python 2.7 syntax of: # tree.find("./devices/console[@type='file']/source") # because we need to support 2.6. console_nodes = tree.findall('./devices/console') for console_node in console_nodes: console_type = console_node.get('type') console_types.setdefault(console_type, []) console_types[console_type].append(console_node) # If the guest has a console logging to a file prefer to use that if console_types.get('file'): for file_console in console_types.get('file'): source_node = file_console.find('./source') if source_node is None: continue path = source_node.get("path") if not path: continue libvirt_utils.chown(path, os.getuid()) with libvirt_utils.file_open(path, 'rb') as fp: log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES) if remaining > 0: LOG.info(_('Truncated console log returned, %d bytes ' 'ignored'), remaining, instance=instance) return log_data # Try 'pty' types if console_types.get('pty'): for pty_console in console_types.get('pty'): source_node = pty_console.find('./source') if source_node is None: continue pty = source_node.get("path") if not pty: continue break else: msg = _("Guest does not have a console available") raise exception.NovaException(msg) self._chown_console_log_for_instance(instance) data = self._flush_libvirt_console(pty) console_log = self._get_console_log_path(instance) fpath = self._append_to_file(data, console_log) with libvirt_utils.file_open(fpath, 'rb') as fp: log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES) if remaining > 0: LOG.info(_('Truncated console log returned, %d bytes ignored'), remaining, instance=instance) return log_data @staticmethod def get_host_ip_addr(): return CONF.my_ip def get_vnc_console(self, context, instance): def get_vnc_port_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) dom = xmlutils.safe_minidom_parse_string(xml) for graphic in dom.getElementsByTagName('graphics'): if graphic.getAttribute('type') == 'vnc': return graphic.getAttribute('port') # NOTE(rmk): We had VNC consoles enabled but the instance in # question is not actually listening for connections. raise exception.ConsoleTypeUnavailable(console_type='vnc') port = get_vnc_port_for_instance(instance.name) host = CONF.vncserver_proxyclient_address return {'host': host, 'port': port, 'internal_access_path': None} def get_spice_console(self, context, instance): def get_spice_ports_for_instance(instance_name): virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) # TODO(sleepsonthefloor): use etree instead of minidom dom = xmlutils.safe_minidom_parse_string(xml) for graphic in dom.getElementsByTagName('graphics'): if graphic.getAttribute('type') == 'spice': return (graphic.getAttribute('port'), graphic.getAttribute('tlsPort')) # NOTE(rmk): We had Spice consoles enabled but the instance in # question is not actually listening for connections. raise exception.ConsoleTypeUnavailable(console_type='spice') ports = get_spice_ports_for_instance(instance['name']) host = CONF.spice.server_proxyclient_address return {'host': host, 'port': ports[0], 'tlsPort': ports[1], 'internal_access_path': None} @staticmethod def _supports_direct_io(dirpath): if not hasattr(os, 'O_DIRECT'): LOG.debug(_("This python runtime does not support direct I/O")) return False testfile = os.path.join(dirpath, ".directio.test") hasDirectIO = True try: f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT) # Check is the write allowed with 512 byte alignment align_size = 512 m = mmap.mmap(-1, align_size) m.write(r"x" * align_size) os.write(f, m) os.close(f) LOG.debug(_("Path '%(path)s' supports direct I/O") % {'path': dirpath}) except OSError as e: if e.errno == errno.EINVAL: LOG.debug(_("Path '%(path)s' does not support direct I/O: " "'%(ex)s'") % {'path': dirpath, 'ex': str(e)}) hasDirectIO = False else: with excutils.save_and_reraise_exception(): LOG.error(_("Error on '%(path)s' while checking " "direct I/O: '%(ex)s'") % {'path': dirpath, 'ex': str(e)}) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_("Error on '%(path)s' while checking direct I/O: " "'%(ex)s'") % {'path': dirpath, 'ex': str(e)}) finally: try: os.unlink(testfile) except Exception: pass return hasDirectIO @staticmethod def _create_local(target, local_size, unit='G', fs_format=None, label=None): """Create a blank image of specified size.""" libvirt_utils.create_image('raw', target, '%d%c' % (local_size, unit)) def _create_ephemeral(self, target, ephemeral_size, fs_label, os_type, is_block_dev=False, max_size=None): if not is_block_dev: self._create_local(target, ephemeral_size) # Run as root only for block devices. disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev) @staticmethod def _create_swap(target, swap_mb, max_size=None): """Create a swap file of specified size.""" libvirt_utils.create_image('raw', target, '%dM' % swap_mb) utils.mkfs('swap', target) @staticmethod def _get_console_log_path(instance): return os.path.join(libvirt_utils.get_instance_path(instance), 'console.log') @staticmethod def _get_disk_config_path(instance): return os.path.join(libvirt_utils.get_instance_path(instance), 'disk.config') def _chown_console_log_for_instance(self, instance): console_log = self._get_console_log_path(instance) if os.path.exists(console_log): libvirt_utils.chown(console_log, os.getuid()) def _chown_disk_config_for_instance(self, instance): disk_config = self._get_disk_config_path(instance) if os.path.exists(disk_config): libvirt_utils.chown(disk_config, os.getuid()) @staticmethod def _is_booted_from_volume(instance, disk_mapping): """Determines whether the VM is booting from volume Determines whether the disk mapping indicates that the VM is booting from a volume. """ return ((not bool(instance.get('image_ref'))) or 'disk' not in disk_mapping) def _inject_data(self, instance, network_info, admin_pass, files, suffix): """Injects data in an disk image Helper used for injecting data in a disk image file system. Keyword arguments: instance -- a dict that refers instance specifications network_info -- a dict that refers network speficications admin_pass -- a string used to set an admin password files -- a list of files needs to be injected suffix -- a string used as a image name suffix """ # Handles the partition need to be used. target_partition = None if not instance['kernel_id']: target_partition = CONF.libvirt.inject_partition if target_partition == 0: target_partition = None if CONF.libvirt.virt_type == 'lxc': target_partition = None # Handles the key injection. if CONF.libvirt.inject_key and instance.get('key_data'): key = str(instance['key_data']) else: key = None # Handles the admin password injection. if not CONF.libvirt.inject_password: admin_pass = None # Handles the network injection. net = netutils.get_injected_network_template(network_info) # Handles the metadata injection metadata = instance.get('metadata') image_type = CONF.libvirt.images_type if any((key, net, metadata, admin_pass, files)): injection_path = self.image_backend.image( instance, 'disk' + suffix, image_type).path img_id = instance['image_ref'] try: disk.inject_data(injection_path, key, net, metadata, admin_pass, files, partition=target_partition, use_cow=CONF.use_cow_images, mandatory=('files',)) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_('Error injecting data into image ' '%(img_id)s (%(e)s)'), {'img_id': img_id, 'e': e}, instance=instance) def _create_image(self, context, instance, disk_mapping, suffix='', disk_images=None, network_info=None, block_device_info=None, files=None, admin_pass=None, inject_files=True): if not suffix: suffix = '' booted_from_volume = self._is_booted_from_volume( instance, disk_mapping) def image(fname, image_type=CONF.libvirt.images_type): return self.image_backend.image(instance, fname + suffix, image_type) def raw(fname): return image(fname, image_type='raw') # ensure directories exist and are writable fileutils.ensure_tree(libvirt_utils.get_instance_path(instance)) LOG.info(_('Creating image'), instance=instance) # NOTE(dprince): for rescue console.log may already exist... chown it. self._chown_console_log_for_instance(instance) # NOTE(yaguang): For evacuate disk.config already exist in shared # storage, chown it. self._chown_disk_config_for_instance(instance) # NOTE(vish): No need add the suffix to console.log libvirt_utils.write_to_file( self._get_console_log_path(instance), '', 7) if not disk_images: disk_images = {'image_id': instance['image_ref'], 'kernel_id': instance['kernel_id'], 'ramdisk_id': instance['ramdisk_id']} if disk_images['kernel_id']: fname = imagecache.get_cache_fname(disk_images, 'kernel_id') raw('kernel').cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=fname, image_id=disk_images['kernel_id'], user_id=instance['user_id'], project_id=instance['project_id']) if disk_images['ramdisk_id']: fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id') raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=fname, image_id=disk_images['ramdisk_id'], user_id=instance['user_id'], project_id=instance['project_id']) inst_type = flavors.extract_flavor(instance) # NOTE(ndipanov): Even if disk_mapping was passed in, which # currently happens only on rescue - we still don't want to # create a base image. if not booted_from_volume: root_fname = imagecache.get_cache_fname(disk_images, 'image_id') size = instance['root_gb'] * units.Gi if size == 0 or suffix == '.rescue': size = None image('disk').cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=root_fname, size=size, image_id=disk_images['image_id'], user_id=instance['user_id'], project_id=instance['project_id']) # Lookup the filesystem type if required os_type_with_default = disk.get_fs_type_for_os_type( instance['os_type']) ephemeral_gb = instance['ephemeral_gb'] if 'disk.local' in disk_mapping: disk_image = image('disk.local') fn = functools.partial(self._create_ephemeral, fs_label='ephemeral0', os_type=instance["os_type"], is_block_dev=disk_image.is_block_dev) fname = "ephemeral_%s_%s" % (ephemeral_gb, os_type_with_default) size = ephemeral_gb * units.Gi disk_image.cache(fetch_func=fn, filename=fname, size=size, ephemeral_size=ephemeral_gb) for idx, eph in enumerate(driver.block_device_info_get_ephemerals( block_device_info)): disk_image = image(blockinfo.get_eph_disk(idx)) fn = functools.partial(self._create_ephemeral, fs_label='ephemeral%d' % idx, os_type=instance["os_type"], is_block_dev=disk_image.is_block_dev) size = eph['size'] * units.Gi fname = "ephemeral_%s_%s" % (eph['size'], os_type_with_default) disk_image.cache( fetch_func=fn, filename=fname, size=size, ephemeral_size=eph['size']) if 'disk.swap' in disk_mapping: mapping = disk_mapping['disk.swap'] swap_mb = 0 swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): swap_mb = swap['swap_size'] elif (inst_type['swap'] > 0 and not block_device.volume_in_mapping( mapping['dev'], block_device_info)): swap_mb = inst_type['swap'] if swap_mb > 0: size = swap_mb * units.Mi image('disk.swap').cache(fetch_func=self._create_swap, filename="swap_%s" % swap_mb, size=size, swap_mb=swap_mb) # Config drive if configdrive.required_by(instance): LOG.info(_('Using config drive'), instance=instance) extra_md = {} if admin_pass: extra_md['admin_pass'] = admin_pass inst_md = instance_metadata.InstanceMetadata(instance, content=files, extra_md=extra_md, network_info=network_info) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: configdrive_path = self._get_disk_config_path(instance) LOG.info(_('Creating config drive at %(path)s'), {'path': configdrive_path}, instance=instance) try: cdb.make_drive(configdrive_path) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error(_('Creating config drive failed ' 'with error: %s'), e, instance=instance) # File injection only if needed elif inject_files and CONF.libvirt.inject_partition != -2: if booted_from_volume: LOG.warn(_('File injection into a boot from volume ' 'instance is not supported'), instance=instance) self._inject_data( instance, network_info, admin_pass, files, suffix) if CONF.libvirt.virt_type == 'uml': libvirt_utils.chown(image('disk').path, 'root') def _prepare_pci_devices_for_use(self, pci_devices): # kvm , qemu support managed mode # In managed mode, the configured device will be automatically # detached from the host OS drivers when the guest is started, # and then re-attached when the guest shuts down. if CONF.libvirt.virt_type != 'xen': # we do manual detach only for xen return try: for dev in pci_devices: libvirt_dev_addr = dev['hypervisor_name'] libvirt_dev = \ self._conn.nodeDeviceLookupByName(libvirt_dev_addr) # Note(yjiang5) Spelling for 'dettach' is correct, see # http://libvirt.org/html/libvirt-libvirt.html. libvirt_dev.dettach() # Note(yjiang5): A reset of one PCI device may impact other # devices on the same bus, thus we need two separated loops # to detach and then reset it. for dev in pci_devices: libvirt_dev_addr = dev['hypervisor_name'] libvirt_dev = \ self._conn.nodeDeviceLookupByName(libvirt_dev_addr) libvirt_dev.reset() except libvirt.libvirtError as exc: raise exception.PciDevicePrepareFailed(id=dev['id'], instance_uuid= dev['instance_uuid'], reason=str(exc)) def _detach_pci_devices(self, dom, pci_devs): # for libvirt version < 1.1.1, this is race condition # so forbid detach if not had this version if not self.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION): if pci_devs: reason = (_("Detaching PCI devices with libvirt < %(ver)s" " is not permitted") % {'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION}) raise exception.PciDeviceDetachFailed(reason=reason, dev=pci_devs) try: for dev in pci_devs: dom.detachDeviceFlags(self.get_guest_pci_device(dev).to_xml(), libvirt.VIR_DOMAIN_AFFECT_LIVE) # after detachDeviceFlags returned, we should check the dom to # ensure the detaching is finished xml = dom.XMLDesc(0) xml_doc = etree.fromstring(xml) guest_config = vconfig.LibvirtConfigGuest() guest_config.parse_dom(xml_doc) for hdev in [d for d in guest_config.devices if d.type == 'pci']: hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function] dbsf = pci_utils.parse_address(dev['address']) if [int(x, 16) for x in hdbsf] ==\ [int(x, 16) for x in dbsf]: raise exception.PciDeviceDetachFailed(reason= "timeout", dev=dev) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: LOG.warn(_("Instance disappeared while detaching " "a PCI device from it.")) else: raise def _attach_pci_devices(self, dom, pci_devs): try: for dev in pci_devs: dom.attachDevice(self.get_guest_pci_device(dev).to_xml()) except libvirt.libvirtError: LOG.error(_('Attaching PCI devices %(dev)s to %(dom)s failed.') % {'dev': pci_devs, 'dom': dom.ID()}) raise def _set_host_enabled(self, enabled, disable_reason=DISABLE_REASON_UNDEFINED): """Enables / Disables the compute service on this host. This doesn't override non-automatic disablement with an automatic setting; thereby permitting operators to keep otherwise healthy hosts out of rotation. """ status_name = {True: 'disabled', False: 'enabled'} disable_service = not enabled ctx = nova_context.get_admin_context() try: service = service_obj.Service.get_by_compute_host(ctx, CONF.host) if service.disabled != disable_service: # Note(jang): this is a quick fix to stop operator- # disabled compute hosts from re-enabling themselves # automatically. We prefix any automatic reason code # with a fixed string. We only re-enable a host # automatically if we find that string in place. # This should probably be replaced with a separate flag. if not service.disabled or ( service.disabled_reason and service.disabled_reason.startswith(DISABLE_PREFIX)): service.disabled = disable_service service.disabled_reason = ( DISABLE_PREFIX + disable_reason if disable_service else DISABLE_REASON_UNDEFINED) service.save() LOG.debug(_('Updating compute service status to %s'), status_name[disable_service]) else: LOG.debug(_('Not overriding manual compute service ' 'status with: %s'), status_name[disable_service]) except exception.ComputeHostNotFound: LOG.warn(_('Cannot update service status on host: %s,' 'since it is not registered.') % CONF.host) except Exception: LOG.warn(_('Cannot update service status on host: %s,' 'due to an unexpected exception.') % CONF.host, exc_info=True) def get_host_capabilities(self): """Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host. """ if not self._caps: xmlstr = self._conn.getCapabilities() self._caps = vconfig.LibvirtConfigCaps() self._caps.parse_str(xmlstr) if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'): try: features = self._conn.baselineCPU( [self._caps.host.cpu.to_xml()], libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES) # FIXME(wangpan): the return value of baselineCPU should be # None or xml string, but libvirt has a bug # of it from 1.1.2 which is fixed in 1.2.0, # this -1 checking should be removed later. if features and features != -1: self._caps.host.cpu.parse_str(features) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_SUPPORT: LOG.warn(_LW("URI %(uri)s does not support full set" " of host capabilities: " "%(error)s"), {'uri': self.uri(), 'error': ex}) else: raise return self._caps def get_host_uuid(self): """Returns a UUID representing the host.""" caps = self.get_host_capabilities() return caps.host.uuid def get_host_cpu_for_guest(self): """Returns an instance of config.LibvirtConfigGuestCPU representing the host's CPU model & topology with policy for configuring a guest to match """ caps = self.get_host_capabilities() hostcpu = caps.host.cpu guestcpu = vconfig.LibvirtConfigGuestCPU() guestcpu.model = hostcpu.model guestcpu.vendor = hostcpu.vendor guestcpu.arch = hostcpu.arch guestcpu.match = "exact" for hostfeat in hostcpu.features: guestfeat = vconfig.LibvirtConfigGuestCPUFeature(hostfeat.name) guestfeat.policy = "require" guestcpu.add_feature(guestfeat) return guestcpu def get_guest_cpu_config(self): mode = CONF.libvirt.cpu_mode model = CONF.libvirt.cpu_model if mode is None: if ((CONF.libvirt.virt_type == "kvm" or CONF.libvirt.virt_type == "qemu")): mode = "host-model" else: mode = "none" if mode == "none": return None if ((CONF.libvirt.virt_type != "kvm" and CONF.libvirt.virt_type != "qemu")): msg = _("Config requested an explicit CPU model, but " "the current libvirt hypervisor '%s' does not " "support selecting CPU models") % CONF.libvirt.virt_type raise exception.Invalid(msg) if mode == "custom" and model is None: msg = _("Config requested a custom CPU model, but no " "model name was provided") raise exception.Invalid(msg) elif mode != "custom" and model is not None: msg = _("A CPU model name should not be set when a " "host CPU model is requested") raise exception.Invalid(msg) LOG.debug(_("CPU mode '%(mode)s' model '%(model)s' was chosen") % {'mode': mode, 'model': (model or "")}) # TODO(berrange): in the future, when MIN_LIBVIRT_VERSION is # updated to be at least this new, we can kill off the elif # blocks here if self.has_min_version(MIN_LIBVIRT_HOST_CPU_VERSION): cpu = vconfig.LibvirtConfigGuestCPU() cpu.mode = mode cpu.model = model elif mode == "custom": cpu = vconfig.LibvirtConfigGuestCPU() cpu.model = model elif mode == "host-model": cpu = self.get_host_cpu_for_guest() elif mode == "host-passthrough": msg = _("Passthrough of the host CPU was requested but " "this libvirt version does not support this feature") raise exception.NovaException(msg) return cpu def get_guest_disk_config(self, instance, name, disk_mapping, inst_type, image_type=None): image = self.image_backend.image(instance, name, image_type) disk_info = disk_mapping[name] return image.libvirt_info(disk_info['bus'], disk_info['dev'], disk_info['type'], self.disk_cachemode, inst_type['extra_specs'], self.get_hypervisor_version()) def get_guest_storage_config(self, instance, image_meta, disk_info, rescue, block_device_info, inst_type): devices = [] disk_mapping = disk_info['mapping'] block_device_mapping = driver.block_device_info_get_mapping( block_device_info) if CONF.libvirt.virt_type == "lxc": fs = vconfig.LibvirtConfigGuestFilesys() fs.source_type = "mount" fs.source_dir = os.path.join( libvirt_utils.get_instance_path(instance), 'rootfs') devices.append(fs) else: if rescue: diskrescue = self.get_guest_disk_config(instance, 'disk.rescue', disk_mapping, inst_type) devices.append(diskrescue) diskos = self.get_guest_disk_config(instance, 'disk', disk_mapping, inst_type) devices.append(diskos) else: if 'disk' in disk_mapping: diskos = self.get_guest_disk_config(instance, 'disk', disk_mapping, inst_type) devices.append(diskos) if 'disk.local' in disk_mapping: disklocal = self.get_guest_disk_config(instance, 'disk.local', disk_mapping, inst_type) devices.append(disklocal) self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'default_ephemeral_device': block_device.prepend_dev(disklocal.target_dev)}) for idx, eph in enumerate( driver.block_device_info_get_ephemerals( block_device_info)): diskeph = self.get_guest_disk_config( instance, blockinfo.get_eph_disk(idx), disk_mapping, inst_type) devices.append(diskeph) if 'disk.swap' in disk_mapping: diskswap = self.get_guest_disk_config(instance, 'disk.swap', disk_mapping, inst_type) devices.append(diskswap) self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'default_swap_device': block_device.prepend_dev( diskswap.target_dev)}) for vol in block_device_mapping: connection_info = vol['connection_info'] vol_dev = block_device.prepend_dev(vol['mount_device']) info = disk_mapping[vol_dev] cfg = self.volume_driver_method('connect_volume', connection_info, info) devices.append(cfg) vol['connection_info'] = connection_info vol.save(nova_context.get_admin_context()) if 'disk.config' in disk_mapping: diskconfig = self.get_guest_disk_config(instance, 'disk.config', disk_mapping, inst_type, 'raw') devices.append(diskconfig) for d in devices: self.set_cache_mode(d) if (image_meta and image_meta.get('properties', {}).get('hw_scsi_model')): hw_scsi_model = image_meta['properties']['hw_scsi_model'] scsi_controller = vconfig.LibvirtConfigGuestController() scsi_controller.type = 'scsi' scsi_controller.model = hw_scsi_model devices.append(scsi_controller) return devices def get_guest_config_sysinfo(self, instance): sysinfo = vconfig.LibvirtConfigGuestSysinfo() sysinfo.system_manufacturer = version.vendor_string() sysinfo.system_product = version.product_string() sysinfo.system_version = version.version_string_with_package() sysinfo.system_serial = self.get_host_uuid() sysinfo.system_uuid = instance['uuid'] return sysinfo def get_guest_pci_device(self, pci_device): dbsf = pci_utils.parse_address(pci_device['address']) dev = vconfig.LibvirtConfigGuestHostdevPCI() dev.domain, dev.bus, dev.slot, dev.function = dbsf # only kvm support managed mode if CONF.libvirt.virt_type in ('xen',): dev.managed = 'no' if CONF.libvirt.virt_type in ('kvm', 'qemu'): dev.managed = 'yes' return dev def get_guest_config(self, instance, network_info, image_meta, disk_info, rescue=None, block_device_info=None): """Get config data for parameters. :param rescue: optional dictionary that should contain the key 'ramdisk_id' if a ramdisk is needed for the rescue image and 'kernel_id' if a kernel is needed for the rescue image. """ flavor = flavor_obj.Flavor.get_by_id( nova_context.get_admin_context(read_deleted='yes'), instance['instance_type_id']) inst_path = libvirt_utils.get_instance_path(instance) disk_mapping = disk_info['mapping'] img_meta_prop = image_meta.get('properties', {}) if image_meta else {} CONSOLE = "console=tty0 console=ttyS0" guest = vconfig.LibvirtConfigGuest() guest.virt_type = CONF.libvirt.virt_type guest.name = instance['name'] guest.uuid = instance['uuid'] # We are using default unit for memory: KiB guest.memory = flavor.memory_mb * units.Ki guest.vcpus = flavor.vcpus guest.cpuset = CONF.vcpu_pin_set quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota'] for key, value in flavor.extra_specs.iteritems(): scope = key.split(':') if len(scope) > 1 and scope[0] == 'quota': if scope[1] in quota_items: setattr(guest, scope[1], value) guest.cpu = self.get_guest_cpu_config() if 'root' in disk_mapping: root_device_name = block_device.prepend_dev( disk_mapping['root']['dev']) else: root_device_name = None if root_device_name: # NOTE(yamahata): # for nova.api.ec2.cloud.CloudController.get_metadata() self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'root_device_name': root_device_name}) guest.os_type = vm_mode.get_from_instance(instance) if guest.os_type is None: if CONF.libvirt.virt_type == "lxc": guest.os_type = vm_mode.EXE elif CONF.libvirt.virt_type == "uml": guest.os_type = vm_mode.UML elif CONF.libvirt.virt_type == "xen": guest.os_type = vm_mode.XEN else: guest.os_type = vm_mode.HVM if CONF.libvirt.virt_type == "xen" and guest.os_type == vm_mode.HVM: guest.os_loader = CONF.libvirt.xen_hvmloader_path if CONF.libvirt.virt_type in ("kvm", "qemu"): caps = self.get_host_capabilities() if caps.host.cpu.arch in ("i686", "x86_64"): guest.sysinfo = self.get_guest_config_sysinfo(instance) guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS() # The underlying machine type can be set as an image attribute, # or otherwise based on some architecture specific defaults if (image_meta is not None and image_meta.get('properties') and image_meta['properties'].get('hw_machine_type') is not None): guest.os_mach_type = \ image_meta['properties']['hw_machine_type'] else: # For ARM systems we will default to vexpress-a15 for armv7 # and virt for aarch64 if caps.host.cpu.arch == "armv7l": guest.os_mach_type = "vexpress-a15" if caps.host.cpu.arch == "aarch64": guest.os_mach_type = "virt" if CONF.libvirt.virt_type == "lxc": guest.os_init_path = "/sbin/init" guest.os_cmdline = CONSOLE elif CONF.libvirt.virt_type == "uml": guest.os_kernel = "/usr/bin/linux" guest.os_root = root_device_name else: if rescue: if rescue.get('kernel_id'): guest.os_kernel = os.path.join(inst_path, "kernel.rescue") if CONF.libvirt.virt_type == "xen": guest.os_cmdline = "ro root=%s" % root_device_name else: guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE)) if CONF.libvirt.virt_type == "qemu": guest.os_cmdline += " no_timer_check" if rescue.get('ramdisk_id'): guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue") elif instance['kernel_id']: guest.os_kernel = os.path.join(inst_path, "kernel") if CONF.libvirt.virt_type == "xen": guest.os_cmdline = "ro root=%s" % root_device_name else: guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE)) if CONF.libvirt.virt_type == "qemu": guest.os_cmdline += " no_timer_check" if instance['ramdisk_id']: guest.os_initrd = os.path.join(inst_path, "ramdisk") else: guest.os_boot_dev = blockinfo.get_boot_order(disk_info) if (image_meta and image_meta.get('properties', {}).get('os_command_line')): guest.os_cmdline = \ image_meta['properties'].get('os_command_line') if ((CONF.libvirt.virt_type != "lxc" and CONF.libvirt.virt_type != "uml")): guest.acpi = True guest.apic = True # NOTE(mikal): Microsoft Windows expects the clock to be in # "localtime". If the clock is set to UTC, then you can use a # registry key to let windows know, but Microsoft says this is # buggy in http://support.microsoft.com/kb/2687252 clk = vconfig.LibvirtConfigGuestClock() if instance['os_type'] == 'windows': LOG.info(_('Configuring timezone for windows instance to ' 'localtime'), instance=instance) clk.offset = 'localtime' else: clk.offset = 'utc' guest.set_clock(clk) if CONF.libvirt.virt_type == "kvm": # TODO(berrange) One day this should be per-guest # OS type configurable tmpit = vconfig.LibvirtConfigGuestTimer() tmpit.name = "pit" tmpit.tickpolicy = "delay" tmrtc = vconfig.LibvirtConfigGuestTimer() tmrtc.name = "rtc" tmrtc.tickpolicy = "catchup" clk.add_timer(tmpit) clk.add_timer(tmrtc) arch = libvirt_utils.get_arch(image_meta) if arch in ("i686", "x86_64"): # NOTE(rfolco): HPET is a hardware timer for x86 arch. # qemu -no-hpet is not supported on non-x86 targets. tmhpet = vconfig.LibvirtConfigGuestTimer() tmhpet.name = "hpet" tmhpet.present = False clk.add_timer(tmhpet) for cfg in self.get_guest_storage_config(instance, image_meta, disk_info, rescue, block_device_info, flavor): guest.add_device(cfg) for vif in network_info: cfg = self.vif_driver.get_config(instance, vif, image_meta, flavor) guest.add_device(cfg) if ((CONF.libvirt.virt_type == "qemu" or CONF.libvirt.virt_type == "kvm")): # The QEMU 'pty' driver throws away any data if no # client app is connected. Thus we can't get away # with a single type=pty console. Instead we have # to configure two separate consoles. consolelog = vconfig.LibvirtConfigGuestSerial() consolelog.type = "file" consolelog.source_path = self._get_console_log_path(instance) guest.add_device(consolelog) consolepty = vconfig.LibvirtConfigGuestSerial() consolepty.type = "pty" guest.add_device(consolepty) else: consolepty = vconfig.LibvirtConfigGuestConsole() consolepty.type = "pty" guest.add_device(consolepty) # We want a tablet if VNC is enabled, # or SPICE is enabled and the SPICE agent is disabled # NB: this implies that if both SPICE + VNC are enabled # at the same time, we'll get the tablet whether the # SPICE agent is used or not. need_usb_tablet = False if CONF.vnc_enabled: need_usb_tablet = CONF.libvirt.use_usb_tablet elif CONF.spice.enabled and not CONF.spice.agent_enabled: need_usb_tablet = CONF.libvirt.use_usb_tablet if need_usb_tablet and guest.os_type == vm_mode.HVM: tablet = vconfig.LibvirtConfigGuestInput() tablet.type = "tablet" tablet.bus = "usb" guest.add_device(tablet) if CONF.spice.enabled and CONF.spice.agent_enabled and \ CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'): channel = vconfig.LibvirtConfigGuestChannel() channel.target_name = "com.redhat.spice.0" guest.add_device(channel) # NB some versions of libvirt support both SPICE and VNC # at the same time. We're not trying to second guess which # those versions are. We'll just let libvirt report the # errors appropriately if the user enables both. add_video_driver = False if ((CONF.vnc_enabled and CONF.libvirt.virt_type not in ('lxc', 'uml'))): graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "vnc" graphics.keymap = CONF.vnc_keymap graphics.listen = CONF.vncserver_listen guest.add_device(graphics) add_video_driver = True if CONF.spice.enabled and \ CONF.libvirt.virt_type not in ('lxc', 'uml', 'xen'): graphics = vconfig.LibvirtConfigGuestGraphics() graphics.type = "spice" graphics.keymap = CONF.spice.keymap graphics.listen = CONF.spice.server_listen guest.add_device(graphics) add_video_driver = True if add_video_driver: VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl") video = vconfig.LibvirtConfigGuestVideo() # NOTE(ldbragst): The following logic sets the video.type # depending on supported defaults given the architecture, # virtualization type, and features. The video.type attribute can # be overridden by the user with image_meta['properties'], which # is carried out in the next if statement below this one. arch = libvirt_utils.get_arch(image_meta) if guest.os_type == vm_mode.XEN: video.type = 'xen' elif arch in ('ppc', 'ppc64'): # NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default # so use 'vga' instead when running on Power hardware. video.type = 'vga' elif CONF.spice.enabled: video.type = 'qxl' if img_meta_prop.get('hw_video_model'): video.type = img_meta_prop.get('hw_video_model') if (video.type not in VALID_VIDEO_DEVICES): raise exception.InvalidVideoMode(model=video.type) # Set video memory, only if the flavor's limit is set video_ram = int(img_meta_prop.get('hw_video_ram', 0)) max_vram = int(flavor.extra_specs .get('hw_video:ram_max_mb', 0)) if video_ram > max_vram: raise exception.RequestedVRamTooHigh(req_vram=video_ram, max_vram=max_vram) if max_vram and video_ram: video.vram = video_ram guest.add_device(video) # Qemu guest agent only support 'qemu' and 'kvm' hypervisor if CONF.libvirt.virt_type in ('qemu', 'kvm'): qga_enabled = False # Enable qga only if the 'hw_qemu_guest_agent' is equal to yes hw_qga = img_meta_prop.get('hw_qemu_guest_agent', 'no') if hw_qga.lower() == 'yes': LOG.debug(_("Qemu guest agent is enabled through image " "metadata"), instance=instance) qga_enabled = True if qga_enabled: qga = vconfig.LibvirtConfigGuestChannel() qga.type = "unix" qga.target_name = "org.qemu.guest_agent.0" qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" % ("org.qemu.guest_agent.0", instance['name'])) guest.add_device(qga) if (img_meta_prop.get('hw_rng_model') == 'virtio' and flavor.extra_specs.get('hw_rng:allowed', '').lower() == 'true'): rng_device = vconfig.LibvirtConfigGuestRng() rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0) period = flavor.extra_specs.get('hw_rng:rate_period', 0) if rate_bytes: rng_device.rate_bytes = int(rate_bytes) rng_device.rate_period = int(period) if (CONF.libvirt.rng_dev_path and not os.path.exists(CONF.libvirt.rng_dev_path)): raise exception.RngDeviceNotExist( path=CONF.libvirt.rng_dev_path) rng_device.backend = CONF.libvirt.rng_dev_path guest.add_device(rng_device) if CONF.libvirt.virt_type in ('xen', 'qemu', 'kvm'): for pci_dev in pci_manager.get_instance_pci_devs(instance): guest.add_device(self.get_guest_pci_device(pci_dev)) else: if len(pci_manager.get_instance_pci_devs(instance)) > 0: raise exception.PciDeviceUnsupportedHypervisor( type=CONF.libvirt.virt_type) if 'hw_watchdog_action' in flavor.extra_specs: LOG.warn(_LW('Old property name "hw_watchdog_action" is now ' 'deprecated and will be removed in L release. ' 'Use updated property name ' '"hw:watchdog_action" instead')) # TODO(pkholkin): accepting old property name 'hw_watchdog_action' # should be removed in L release watchdog_action = (flavor.extra_specs.get('hw_watchdog_action') or flavor.extra_specs.get('hw:watchdog_action') or 'disabled') if (image_meta is not None and image_meta.get('properties', {}).get('hw_watchdog_action')): watchdog_action = image_meta['properties']['hw_watchdog_action'] # NB(sross): currently only actually supported by KVM/QEmu if watchdog_action != 'disabled': if watchdog_actions.is_valid_watchdog_action(watchdog_action): bark = vconfig.LibvirtConfigGuestWatchdog() bark.action = watchdog_action guest.add_device(bark) else: raise exception.InvalidWatchdogAction(action=watchdog_action) return guest def to_xml(self, context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): # We should get image metadata every time for generating xml if image_meta is None: (image_service, image_id) = glance.get_remote_image_service( context, instance['image_ref']) image_meta = compute_utils.get_image_metadata( context, image_service, image_id, instance) # NOTE(danms): Stringifying a NetworkInfo will take a lock. Do # this ahead of time so that we don't acquire it while also # holding the logging lock. network_info_str = str(network_info) msg = ('Start to_xml ' 'network_info=%(network_info)s ' 'disk_info=%(disk_info)s ' 'image_meta=%(image_meta)s rescue=%(rescue)s ' 'block_device_info=%(block_device_info)s' % {'network_info': network_info_str, 'disk_info': disk_info, 'image_meta': image_meta, 'rescue': rescue, 'block_device_info': block_device_info}) # NOTE(mriedem): block_device_info can contain auth_password so we # need to sanitize the password in the message. LOG.debug(logging.mask_password(msg), instance=instance) conf = self.get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info) xml = conf.to_xml() if write_to_disk: instance_dir = libvirt_utils.get_instance_path(instance) xml_path = os.path.join(instance_dir, 'libvirt.xml') libvirt_utils.write_to_file(xml_path, xml) LOG.debug(_('End to_xml xml=%(xml)s'), {'xml': xml}, instance=instance) return xml def _lookup_by_id(self, instance_id): """Retrieve libvirt domain object given an instance id. All libvirt error handling should be handled in this method and relevant nova exceptions should be raised in response. """ try: return self._conn.lookupByID(instance_id) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: raise exception.InstanceNotFound(instance_id=instance_id) msg = (_("Error from libvirt while looking up %(instance_id)s: " "[Error Code %(error_code)s] %(ex)s") % {'instance_id': instance_id, 'error_code': error_code, 'ex': ex}) raise exception.NovaException(msg) def _lookup_by_name(self, instance_name): """Retrieve libvirt domain object given an instance name. All libvirt error handling should be handled in this method and relevant nova exceptions should be raised in response. """ try: return self._conn.lookupByName(instance_name) except libvirt.libvirtError as ex: error_code = ex.get_error_code() if error_code == libvirt.VIR_ERR_NO_DOMAIN: raise exception.InstanceNotFound(instance_id=instance_name) msg = (_('Error from libvirt while looking up %(instance_name)s: ' '[Error Code %(error_code)s] %(ex)s') % {'instance_name': instance_name, 'error_code': error_code, 'ex': ex}) raise exception.NovaException(msg) def get_info(self, instance): """Retrieve information from libvirt for a specific instance name. If a libvirt error is encountered during lookup, we might raise a NotFound exception or Error exception depending on how severe the libvirt error is. """ virt_dom = self._lookup_by_name(instance['name']) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': LIBVIRT_POWER_STATE[state], 'max_mem': max_mem, 'mem': mem, 'num_cpu': num_cpu, 'cpu_time': cpu_time, 'id': virt_dom.ID()} def _create_domain(self, xml=None, domain=None, instance=None, launch_flags=0, power_on=True): """Create a domain. Either domain or xml must be passed in. If both are passed, then the domain definition is overwritten from the xml. """ inst_path = None if instance: inst_path = libvirt_utils.get_instance_path(instance) if CONF.libvirt.virt_type == 'lxc': if not inst_path: inst_path = None container_dir = os.path.join(inst_path, 'rootfs') fileutils.ensure_tree(container_dir) image = self.image_backend.image(instance, 'disk') container_root_device = disk.setup_container(image.path, container_dir=container_dir, use_cow=CONF.use_cow_images) #Note(GuanQiang): save container root device name here, used for # detaching the linked image device when deleting # the lxc instance. if container_root_device: self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'root_device_name': container_root_device}) if xml: try: domain = self._conn.defineXML(xml) except Exception as e: LOG.error(_("An error occurred while trying to define a domain" " with xml: %s") % xml) raise e if power_on: try: domain.createWithFlags(launch_flags) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_("An error occurred while trying to launch a " "defined domain with xml: %s") % domain.XMLDesc(0)) if not utils.is_neutron(): try: self._enable_hairpin(domain.XMLDesc(0)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_("An error occurred while enabling hairpin " "mode on domain with xml: %s") % domain.XMLDesc(0)) # NOTE(uni): Now the container is running with its own private mount # namespace and so there is no need to keep the container rootfs # mounted in the host namespace if CONF.libvirt.virt_type == 'lxc': state = self.get_info(instance)['state'] container_dir = os.path.join(inst_path, 'rootfs') if state == power_state.RUNNING: disk.clean_lxc_namespace(container_dir=container_dir) else: disk.teardown_container(container_dir=container_dir) return domain def _neutron_failed_callback(self, event_name, instance): LOG.error(_('Neutron Reported failure on event ' '%(event)s for instance %(uuid)s'), {'event': event_name, 'uuid': instance.uuid}) if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException() def _get_neutron_events(self, network_info): # NOTE(danms): We need to collect any VIFs that are currently # down that we expect a down->up event for. Anything that is # already up will not undergo that transition, and for # anything that might be stale (cache-wise) assume it's # already up so we don't block on it. return [('network-vif-plugged', vif['id']) for vif in network_info if vif.get('active', True) is False] @staticmethod def _conn_supports_start_paused(): return CONF.libvirt.virt_type in ('kvm', 'qemu') def _create_domain_and_network(self, context, xml, instance, network_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False): """Do required network setup and create domain.""" block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_info = blockinfo.get_info_from_bdm( CONF.libvirt.virt_type, vol) conf = self.volume_driver_method('connect_volume', connection_info, disk_info) # cache device_path in connection_info -- required by encryptors if 'data' in connection_info: connection_info['data']['device_path'] = conf.source_path vol['connection_info'] = connection_info vol.save(context) if (not reboot and 'data' in connection_info and 'volume_id' in connection_info['data']): volume_id = connection_info['data']['volume_id'] encryption = encryptors.get_encryption_metadata( context, self._volume_api, volume_id, connection_info) if encryption: encryptor = self._get_volume_encryptor(connection_info, encryption) encryptor.attach_volume(context, **encryption) timeout = CONF.vif_plugging_timeout if (self._conn_supports_start_paused() and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): events = self._get_neutron_events(network_info) else: events = [] launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0 domain = None try: with self.virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=self._neutron_failed_callback): self.plug_vifs(instance, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) domain = self._create_domain( xml, instance=instance, launch_flags=launch_flags, power_on=power_on) self.firewall_driver.apply_instance_filter(instance, network_info) except exception.VirtualInterfaceCreateException: # Neutron reported failure and we didn't swallow it, so # bail here with excutils.save_and_reraise_exception(): if domain: domain.destroy() self.cleanup(context, instance, network_info=network_info, block_device_info=block_device_info) except eventlet.timeout.Timeout: # We never heard from Neutron LOG.warn(_('Timeout waiting for vif plugging callback for ' 'instance %(uuid)s'), {'uuid': instance['uuid']}) if CONF.vif_plugging_is_fatal: if domain: domain.destroy() self.cleanup(context, instance, network_info=network_info, block_device_info=block_device_info) raise exception.VirtualInterfaceCreateException() # Resume only if domain has been paused if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED: domain.resume() return domain def get_all_block_devices(self): """Return all block devices in use on this node.""" devices = [] for dom_id in self.list_instance_ids(): try: domain = self._lookup_by_id(dom_id) doc = etree.fromstring(domain.XMLDesc(0)) except exception.InstanceNotFound: LOG.info(_("libvirt can't find a domain with id: %s") % dom_id) continue except Exception: continue ret = doc.findall('./devices/disk') for node in ret: if node.get('type') != 'block': continue for child in node.getchildren(): if child.tag == 'source': devices.append(child.get('dev')) return devices def get_disks(self, instance_name): """Note that this function takes an instance name. Returns a list of all block devices for this domain. """ domain = self._lookup_by_name(instance_name) xml = domain.XMLDesc(0) try: doc = etree.fromstring(xml) except Exception: return [] return filter(bool, [target.get("dev") for target in doc.findall('devices/disk/target')]) def get_interfaces(self, xml): """Note that this function takes a domain xml. Returns a list of all network interfaces for this instance. """ doc = None try: doc = etree.fromstring(xml) except Exception: return [] interfaces = [] ret = doc.findall('./devices/interface') for node in ret: devdst = None for child in list(node): if child.tag == 'target': devdst = child.attrib['dev'] if devdst is None: continue interfaces.append(devdst) return interfaces def get_vcpu_total(self): """Get available vcpu number of physical computer. :returns: the number of cpu core instances can be used. """ if self._vcpu_total != 0: return self._vcpu_total try: total_pcpus = self._conn.getInfo()[2] except libvirt.libvirtError: LOG.warn(_("Cannot get the number of cpu, because this " "function is not implemented for this platform. ")) return 0 if CONF.vcpu_pin_set is None: self._vcpu_total = total_pcpus return self._vcpu_total available_ids = cpu.get_cpuset_ids() if available_ids[-1] >= total_pcpus: raise exception.Invalid(_("Invalid vcpu_pin_set config, " "out of hypervisor cpu range.")) self._vcpu_total = len(available_ids) return self._vcpu_total def get_memory_mb_total(self): """Get the total memory size(MB) of physical computer. :returns: the total amount of memory(MB). """ return self._conn.getInfo()[1] @staticmethod def get_local_gb_info(): """Get local storage info of the compute node in GB. :returns: A dict containing: :total: How big the overall usable filesystem is (in gigabytes) :free: How much space is free (in gigabytes) :used: How much space is used (in gigabytes) """ if CONF.libvirt.images_type == 'lvm': info = libvirt_utils.get_volume_group_info( CONF.libvirt.images_volume_group) else: info = libvirt_utils.get_fs_info(CONF.instances_path) for (k, v) in info.iteritems(): info[k] = v / units.Gi return info def get_vcpu_used(self): """Get vcpu usage number of physical computer. :returns: The total number of vcpu(s) that are currently being used. """ total = 0 if CONF.libvirt.virt_type == 'lxc': return total + 1 dom_ids = self.list_instance_ids() for dom_id in dom_ids: try: dom = self._lookup_by_id(dom_id) try: vcpus = dom.vcpus() except libvirt.libvirtError as e: LOG.warn(_("couldn't obtain the vpu count from domain id:" " %(id)s, exception: %(ex)s") % {"id": dom_id, "ex": e}) else: if vcpus is not None and len(vcpus) > 1: total += len(vcpus[1]) except exception.InstanceNotFound: LOG.info(_("libvirt can't find a domain with id: %s") % dom_id) continue # NOTE(gtt116): give change to do other task. greenthread.sleep(0) return total def get_memory_mb_used(self): """Get the free memory size(MB) of physical computer. :returns: the total usage of memory(MB). """ if sys.platform.upper() not in ['LINUX2', 'LINUX3']: return 0 m = open('/proc/meminfo').read().split() idx1 = m.index('MemFree:') idx2 = m.index('Buffers:') idx3 = m.index('Cached:') if CONF.libvirt.virt_type == 'xen': used = 0 for domain_id in self.list_instance_ids(): try: dom_mem = int(self._lookup_by_id(domain_id).info()[2]) except exception.InstanceNotFound: LOG.info(_("libvirt can't find a domain with id: %s") % domain_id) continue # skip dom0 if domain_id != 0: used += dom_mem else: # the mem reported by dom0 is be greater of what # it is being used used += (dom_mem - (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))) # Convert it to MB return used / units.Ki else: avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) # Convert it to MB return self.get_memory_mb_total() - avail / units.Ki def get_hypervisor_type(self): """Get hypervisor type. :returns: hypervisor type (ex. qemu) """ return self._conn.getType() def get_hypervisor_version(self): """Get hypervisor version. :returns: hypervisor version (ex. 12003) """ # NOTE(justinsb): getVersion moved between libvirt versions # Trying to do be compatible with older versions is a lost cause # But ... we can at least give the user a nice message method = getattr(self._conn, 'getVersion', None) if method is None: raise exception.NovaException(_("libvirt version is too old" " (does not support getVersion)")) # NOTE(justinsb): If we wanted to get the version, we could: # method = getattr(libvirt, 'getVersion', None) # NOTE(justinsb): This would then rely on a proper version check return method() def get_hypervisor_hostname(self): """Returns the hostname of the hypervisor.""" hostname = self._conn.getHostname() if not hasattr(self, '_hypervisor_hostname'): self._hypervisor_hostname = hostname elif hostname != self._hypervisor_hostname: LOG.error(_('Hostname has changed from %(old)s ' 'to %(new)s. A restart is required to take effect.' ) % {'old': self._hypervisor_hostname, 'new': hostname}) return self._hypervisor_hostname def get_instance_capabilities(self): """Get hypervisor instance capabilities Returns a list of tuples that describe instances the hypervisor is capable of hosting. Each tuple consists of the triplet (arch, hypervisor_type, vm_mode). :returns: List of tuples describing instance capabilities """ caps = self.get_host_capabilities() instance_caps = list() for g in caps.guests: for dt in g.domtype: instance_cap = (g.arch, dt, g.ostype) instance_caps.append(instance_cap) return instance_caps def get_cpu_info(self): """Get cpuinfo information. Obtains cpu feature from virConnect.getCapabilities, and returns as a json string. :return: see above description """ caps = self.get_host_capabilities() cpu_info = dict() cpu_info['arch'] = caps.host.cpu.arch cpu_info['model'] = caps.host.cpu.model cpu_info['vendor'] = caps.host.cpu.vendor topology = dict() topology['sockets'] = caps.host.cpu.sockets topology['cores'] = caps.host.cpu.cores topology['threads'] = caps.host.cpu.threads cpu_info['topology'] = topology features = list() for f in caps.host.cpu.features: features.append(f.name) cpu_info['features'] = features # TODO(berrange): why do we bother converting the # libvirt capabilities XML into a special JSON format ? # The data format is different across all the drivers # so we could just return the raw capabilities XML # which 'compare_cpu' could use directly # # That said, arch_filter.py now seems to rely on # the libvirt drivers format which suggests this # data format needs to be standardized across drivers return jsonutils.dumps(cpu_info) def _get_pcidev_info(self, devname): """Returns a dict of PCI device.""" def _get_device_type(cfgdev): """Get a PCI device's device type. An assignable PCI device can be a normal PCI device, a SR-IOV Physical Function (PF), or a SR-IOV Virtual Function (VF). Only normal PCI devices or SR-IOV VFs are assignable, while SR-IOV PFs are always owned by hypervisor. Please notice that a PCI device with SR-IOV capability but not enabled is reported as normal PCI device. """ for fun_cap in cfgdev.pci_capability.fun_capability: if len(fun_cap.device_addrs) != 0: if fun_cap.type == 'virt_functions': return {'dev_type': 'type-PF'} if fun_cap.type == 'phys_function': phys_address = "%s:%s:%s.%s" % ( fun_cap.device_addrs[0][0].replace("0x", ''), fun_cap.device_addrs[0][1].replace("0x", ''), fun_cap.device_addrs[0][2].replace("0x", ''), fun_cap.device_addrs[0][3].replace("0x", '')) return {'dev_type': 'type-VF', 'phys_function': phys_address} return {'dev_type': 'type-PCI'} virtdev = self._conn.nodeDeviceLookupByName(devname) xmlstr = virtdev.XMLDesc(0) cfgdev = vconfig.LibvirtConfigNodeDevice() cfgdev.parse_str(xmlstr) address = "%04x:%02x:%02x.%1x" % ( cfgdev.pci_capability.domain, cfgdev.pci_capability.bus, cfgdev.pci_capability.slot, cfgdev.pci_capability.function) device = { "dev_id": cfgdev.name, "address": address, "product_id": cfgdev.pci_capability.product_id[2:6], "vendor_id": cfgdev.pci_capability.vendor_id[2:6], } #requirement by DataBase Model device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device device.update(_get_device_type(cfgdev)) return device def _pci_device_assignable(self, device): if device['dev_type'] == 'type-PF': return False return self.dev_filter.device_assignable(device) def get_pci_passthrough_devices(self): """Get host pci devices information. Obtains pci devices information from libvirt, and returns as a json string. Each device information is a dictionary, with mandatory keys of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id', 'label' and other optional device specific information. Refer to the objects/pci_device.py for more idea of these keys. :returns: a list of the assignable pci devices information """ pci_info = [] dev_names = self._conn.listDevices('pci', 0) or [] for name in dev_names: pci_dev = self._get_pcidev_info(name) if self._pci_device_assignable(pci_dev): pci_info.append(pci_dev) return jsonutils.dumps(pci_info) def get_all_volume_usage(self, context, compute_host_bdms): """Return usage info for volumes attached to vms on a given host. """ vol_usage = [] for instance_bdms in compute_host_bdms: instance = instance_bdms['instance'] for bdm in instance_bdms['instance_bdms']: vol_stats = [] mountpoint = bdm['device_name'] if mountpoint.startswith('/dev/'): mountpoint = mountpoint[5:] volume_id = bdm['volume_id'] LOG.debug(_("Trying to get stats for the volume %s"), volume_id) vol_stats = self.block_stats(instance['name'], mountpoint) if vol_stats: stats = dict(volume=volume_id, instance=instance, rd_req=vol_stats[0], rd_bytes=vol_stats[1], wr_req=vol_stats[2], wr_bytes=vol_stats[3], flush_operations=vol_stats[4]) LOG.debug( _("Got volume usage stats for the volume=%(volume)s," " rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, " "wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d"), stats, instance=instance) vol_usage.append(stats) return vol_usage def block_stats(self, instance_name, disk): """Note that this function takes an instance name.""" try: domain = self._lookup_by_name(instance_name) return domain.blockStats(disk) except libvirt.libvirtError as e: errcode = e.get_error_code() LOG.info(_('Getting block stats failed, device might have ' 'been detached. Instance=%(instance_name)s ' 'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'), {'instance_name': instance_name, 'disk': disk, 'errcode': errcode, 'e': e}) except exception.InstanceNotFound: LOG.info(_('Could not find domain in libvirt for instance %s. ' 'Cannot get block stats for device'), instance_name) def interface_stats(self, instance_name, interface): """Note that this function takes an instance name.""" domain = self._lookup_by_name(instance_name) return domain.interfaceStats(interface) def get_console_pool_info(self, console_type): #TODO(mdragon): console proxy should be implemented for libvirt, # in case someone wants to use it with kvm or # such. For now return fake data. return {'address': '127.0.0.1', 'username': 'fakeuser', 'password': 'fakepassword'} def refresh_security_group_rules(self, security_group_id): self.firewall_driver.refresh_security_group_rules(security_group_id) def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) def refresh_instance_security_rules(self, instance): self.firewall_driver.refresh_instance_security_rules(instance) def refresh_provider_fw_rules(self): self.firewall_driver.refresh_provider_fw_rules() def get_available_resource(self, nodename): """Retrieve resource information. This method is called when nova-compute launches, and as part of a periodic task that records the results in the DB. :param nodename: will be put in PCI device :returns: dictionary containing resource info """ # Temporary: convert supported_instances into a string, while keeping # the RPC version as JSON. Can be changed when RPC broadcast is removed stats = self.get_host_stats(refresh=True) stats['supported_instances'] = jsonutils.dumps( stats['supported_instances']) return stats def check_instance_shared_storage_local(self, context, instance): """Check if instance files located on shared storage. This runs check on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :returns :tempfile: A dict containing the tempfile info on the destination host :None: 1. If the instance path is not existing. 2. If the image backend is shared block storage type. """ if self.image_backend.backend().is_shared_block_storage(): return None dirpath = libvirt_utils.get_instance_path(instance) if not os.path.exists(dirpath): return None fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to verify with other " "compute node that the instance is on " "the same shared storage."), tmp_file, instance=instance) os.close(fd) return {"filename": tmp_file} def check_instance_shared_storage_remote(self, context, data): return os.path.exists(data['filename']) def check_instance_shared_storage_cleanup(self, context, data): fileutils.delete_if_exists(data["filename"]) def check_can_live_migrate_destination(self, context, instance, src_compute_info, dst_compute_info, block_migration=False, disk_over_commit=False): """Check if it is possible to execute live migration. This runs checks on the destination host, and then calls back to the source host to check the results. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param block_migration: if true, prepare for block migration :param disk_over_commit: if true, allow disk over commit :returns: a dict containing: :filename: name of the tmpfile under CONF.instances_path :block_migration: whether this is block migration :disk_over_commit: disk-over-commit factor on dest host :disk_available_mb: available disk space on dest host """ disk_available_mb = None if block_migration: disk_available_gb = dst_compute_info['disk_available_least'] disk_available_mb = \ (disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb # Compare CPU source_cpu_info = src_compute_info['cpu_info'] self._compare_cpu(source_cpu_info) # Create file on storage, to be checked on source host filename = self._create_shared_storage_test_file() return {"filename": filename, "block_migration": block_migration, "disk_over_commit": disk_over_commit, "disk_available_mb": disk_available_mb} def check_can_live_migrate_destination_cleanup(self, context, dest_check_data): """Do required cleanup on dest host after check_can_live_migrate calls :param context: security context """ filename = dest_check_data["filename"] self._cleanup_shared_storage_test_file(filename) def check_can_live_migrate_source(self, context, instance, dest_check_data, block_device_info=None): """Check if it is possible to execute live migration. This checks if the live migration can succeed, based on the results from check_can_live_migrate_destination. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance :param dest_check_data: result of check_can_live_migrate_destination :param block_device_info: result of _get_instance_block_device_info :returns: a dict containing migration info """ # Checking shared storage connectivity # if block migration, instances_paths should not be on shared storage. source = CONF.host filename = dest_check_data["filename"] block_migration = dest_check_data["block_migration"] is_volume_backed = dest_check_data.get('is_volume_backed', False) has_local_disks = bool( jsonutils.loads(self.get_instance_disk_info(instance['name']))) shared = self._check_shared_storage_test_file(filename) if block_migration: if shared: reason = _("Block migration can not be used " "with shared storage.") raise exception.InvalidLocalStorage(reason=reason, path=source) self._assert_dest_node_has_enough_disk(context, instance, dest_check_data['disk_available_mb'], dest_check_data['disk_over_commit'], block_device_info) elif not shared and (not is_volume_backed or has_local_disks): reason = _("Live migration can not be used " "without shared storage.") raise exception.InvalidSharedStorage(reason=reason, path=source) dest_check_data.update({"is_shared_storage": shared}) # NOTE(mikal): include the instance directory name here because it # doesn't yet exist on the destination but we want to force that # same name to be used instance_path = libvirt_utils.get_instance_path(instance, relative=True) dest_check_data['instance_relative_path'] = instance_path return dest_check_data def _assert_dest_node_has_enough_disk(self, context, instance, available_mb, disk_over_commit, block_device_info=None): """Checks if destination has enough disk for block migration.""" # Libvirt supports qcow2 disk format,which is usually compressed # on compute nodes. # Real disk image (compressed) may enlarged to "virtual disk size", # that is specified as the maximum disk size. # (See qemu-img -f path-to-disk) # Scheduler recognizes destination host still has enough disk space # if real disk size < available disk size # if disk_over_commit is True, # otherwise virtual disk size < available disk size. available = 0 if available_mb: available = available_mb * units.Mi ret = self.get_instance_disk_info(instance['name'], block_device_info=block_device_info) disk_infos = jsonutils.loads(ret) necessary = 0 if disk_over_commit: for info in disk_infos: necessary += int(info['disk_size']) else: for info in disk_infos: necessary += int(info['virt_disk_size']) # Check that available disk > necessary disk if (available - necessary) < 0: reason = (_('Unable to migrate %(instance_uuid)s: ' 'Disk of instance is too large(available' ' on destination host:%(available)s ' '< need:%(necessary)s)') % {'instance_uuid': instance['uuid'], 'available': available, 'necessary': necessary}) raise exception.MigrationPreCheckError(reason=reason) def _compare_cpu(self, cpu_info): """Checks the host cpu is compatible to a cpu given by xml. "xml" must be a part of libvirt.openAuth(...).getCapabilities(). return values follows by virCPUCompareResult. if 0 > return value, do live migration. 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' :param cpu_info: json string that shows cpu feature(see get_cpu_info()) :returns: None. if given cpu info is not compatible to this server, raise exception. """ # NOTE(berendt): virConnectCompareCPU not working for Xen if CONF.libvirt.virt_type == 'xen': return 1 info = jsonutils.loads(cpu_info) LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) cpu = vconfig.LibvirtConfigCPU() cpu.arch = info['arch'] cpu.model = info['model'] cpu.vendor = info['vendor'] cpu.sockets = info['topology']['sockets'] cpu.cores = info['topology']['cores'] cpu.threads = info['topology']['threads'] for f in info['features']: cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f)) u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult" m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") # unknown character exists in xml, then libvirt complains try: ret = self._conn.compareCPU(cpu.to_xml(), 0) except libvirt.libvirtError as e: with excutils.save_and_reraise_exception(): ret = unicode(e) LOG.error(m, {'ret': ret, 'u': u}) if ret <= 0: LOG.error(m, {'ret': ret, 'u': u}) raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u}) def _create_shared_storage_test_file(self): """Makes tmpfile under CONF.instances_path.""" dirpath = CONF.instances_path fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to notify to other " "compute nodes that they should mount " "the same storage.") % tmp_file) os.close(fd) return os.path.basename(tmp_file) def _check_shared_storage_test_file(self, filename): """Confirms existence of the tmpfile under CONF.instances_path. Cannot confirm tmpfile return False. """ tmp_file = os.path.join(CONF.instances_path, filename) if not os.path.exists(tmp_file): return False else: return True def _cleanup_shared_storage_test_file(self, filename): """Removes existence of the tmpfile under CONF.instances_path.""" tmp_file = os.path.join(CONF.instances_path, filename) os.remove(tmp_file) def ensure_filtering_rules_for_instance(self, instance, network_info, time_module=None): """Ensure that an instance's filtering rules are enabled. When migrating an instance, we need the filtering rules to be configured on the destination host before starting the migration. Also, when restarting the compute service, we need to ensure that filtering rules exist for all running services. """ if not time_module: time_module = greenthread self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) # nwfilters may be defined in a separate thread in the case # of libvirt non-blocking mode, so we wait for completion timeout_count = range(CONF.live_migration_retry_count) while timeout_count: if self.firewall_driver.instance_filter_exists(instance, network_info): break timeout_count.pop() if len(timeout_count) == 0: msg = _('The firewall filter for %s does not exist') raise exception.NovaException(msg % instance.name) time_module.sleep(1) def filter_defer_apply_on(self): self.firewall_driver.filter_defer_apply_on() def filter_defer_apply_off(self): self.firewall_driver.filter_defer_apply_off() def live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Spawning live_migration operation for distributing high-load. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager.post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :param block_migration: if true, do block migration. :param migrate_data: implementation specific params """ greenthread.spawn(self._live_migration, context, instance, dest, post_method, recover_method, block_migration, migrate_data) def _live_migration(self, context, instance, dest, post_method, recover_method, block_migration=False, migrate_data=None): """Do live migration. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param dest: destination host :param post_method: post operation method. expected nova.compute.manager.post_live_migration. :param recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. :param block_migration: if true, do block migration. :param migrate_data: implementation specific params """ # Do live migration. try: if block_migration: flaglist = CONF.libvirt.block_migration_flag.split(',') else: flaglist = CONF.libvirt.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) dom = self._lookup_by_name(instance["name"]) dom.migrateToURI(CONF.libvirt.live_migration_uri % dest, logical_sum, None, CONF.libvirt.live_migration_bandwidth) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_("Live Migration failure: %s"), e, instance=instance) recover_method(context, instance, dest, block_migration) # Waiting for completion of live_migration. timer = loopingcall.FixedIntervalLoopingCall(f=None) def wait_for_live_migration(): """waiting for live migration completion.""" try: self.get_info(instance)['state'] except exception.InstanceNotFound: timer.stop() post_method(context, instance, dest, block_migration, migrate_data) timer.f = wait_for_live_migration timer.start(interval=0.5).wait() def _fetch_instance_kernel_ramdisk(self, context, instance): """Download kernel and ramdisk for instance in instance directory.""" instance_dir = libvirt_utils.get_instance_path(instance) if instance['kernel_id']: libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'kernel'), instance['kernel_id'], instance['user_id'], instance['project_id']) if instance['ramdisk_id']: libvirt_utils.fetch_image(context, os.path.join(instance_dir, 'ramdisk'), instance['ramdisk_id'], instance['user_id'], instance['project_id']) def rollback_live_migration_at_destination(self, context, instance, network_info, block_device_info): """Clean up destination node after a failed live migration.""" self.destroy(context, instance, network_info, block_device_info) def pre_live_migration(self, context, instance, block_device_info, network_info, disk_info, migrate_data=None): """Preparation live migration.""" # Steps for volume backed instance live migration w/o shared storage. is_shared_storage = True is_volume_backed = False is_block_migration = True instance_relative_path = None if migrate_data: is_shared_storage = migrate_data.get('is_shared_storage', True) is_volume_backed = migrate_data.get('is_volume_backed', False) is_block_migration = migrate_data.get('block_migration', True) instance_relative_path = migrate_data.get('instance_relative_path') if not is_shared_storage: # NOTE(dims): Using config drive with iso format does not work # because of a bug in libvirt with read only devices. However # one can use vfat as config_drive_format which works fine. # Please see bug/1246201 for details on the libvirt bug. if CONF.config_drive_format != 'vfat': if configdrive.required_by(instance): raise exception.NoBlockMigrationForConfigDriveInLibVirt() # NOTE(mikal): this doesn't use libvirt_utils.get_instance_path # because we are ensuring that the same instance directory name # is used as was at the source if instance_relative_path: instance_dir = os.path.join(CONF.instances_path, instance_relative_path) else: instance_dir = libvirt_utils.get_instance_path(instance) if os.path.exists(instance_dir): raise exception.DestinationDiskExists(path=instance_dir) os.mkdir(instance_dir) # Ensure images and backing files are present. self._create_images_and_backing(context, instance, instance_dir, disk_info) if is_volume_backed and not (is_block_migration or is_shared_storage): # Touch the console.log file, required by libvirt. console_file = self._get_console_log_path(instance) libvirt_utils.file_open(console_file, 'a').close() # if image has kernel and ramdisk, just download # following normal way. self._fetch_instance_kernel_ramdisk(context, instance) # Establishing connection to volume server. block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_info = blockinfo.get_info_from_bdm( CONF.libvirt.virt_type, vol) self.volume_driver_method('connect_volume', connection_info, disk_info) # We call plug_vifs before the compute manager calls # ensure_filtering_rules_for_instance, to ensure bridge is set up # Retry operation is necessary because continuously request comes, # concurrent request occurs to iptables, then it complains. max_retry = CONF.live_migration_retry_count for cnt in range(max_retry): try: self.plug_vifs(instance, network_info) break except processutils.ProcessExecutionError: if cnt == max_retry - 1: raise else: LOG.warn(_('plug_vifs() failed %(cnt)d. Retry up to ' '%(max_retry)d.'), {'cnt': cnt, 'max_retry': max_retry}, instance=instance) greenthread.sleep(1) def _create_images_and_backing(self, context, instance, instance_dir, disk_info_json): """:param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param instance_dir: instance path to use, calculated externally to handle block migrating an instance with an old style instance path :param disk_info_json: json strings specified in get_instance_disk_info """ if not disk_info_json: disk_info = [] else: disk_info = jsonutils.loads(disk_info_json) for info in disk_info: base = os.path.basename(info['path']) # Get image type and create empty disk image, and # create backing file in case of qcow2. instance_disk = os.path.join(instance_dir, base) if not info['backing_file'] and not os.path.exists(instance_disk): libvirt_utils.create_image(info['type'], instance_disk, info['virt_disk_size']) elif info['backing_file']: # Creating backing file follows same way as spawning instances. cache_name = os.path.basename(info['backing_file']) image = self.image_backend.image(instance, instance_disk, CONF.libvirt.images_type) if cache_name.startswith('ephemeral'): image.cache(fetch_func=self._create_ephemeral, fs_label=cache_name, os_type=instance["os_type"], filename=cache_name, size=info['virt_disk_size'], ephemeral_size=instance['ephemeral_gb']) elif cache_name.startswith('swap'): inst_type = flavors.extract_flavor(instance) swap_mb = inst_type['swap'] image.cache(fetch_func=self._create_swap, filename="swap_%s" % swap_mb, size=swap_mb * units.Mi, swap_mb=swap_mb) else: image.cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=cache_name, image_id=instance['image_ref'], user_id=instance['user_id'], project_id=instance['project_id'], size=info['virt_disk_size']) # if image has kernel and ramdisk, just download # following normal way. self._fetch_instance_kernel_ramdisk(context, instance) def post_live_migration(self, context, instance, block_device_info, migrate_data=None): # Disconnect from volume server block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_dev = vol['mount_device'].rpartition("/")[2] self.volume_driver_method('disconnect_volume', connection_info, disk_dev) def post_live_migration_at_destination(self, context, instance, network_info, block_migration, block_device_info=None): """Post operation of live migration at destination host. :param context: security context :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param network_info: instance network information :param block_migration: if true, post operation of block_migration. """ # Define migrated instance, otherwise, suspend/destroy does not work. dom_list = self._conn.listDefinedDomains() if instance["name"] not in dom_list: # In case of block migration, destination does not have # libvirt.xml disk_info = blockinfo.get_disk_info( CONF.libvirt.virt_type, instance, block_device_info) xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) self._conn.defineXML(xml) def get_instance_disk_info(self, instance_name, xml=None, block_device_info=None): """Retrieve information about actual disk sizes of an instance. :param instance_name: name of a nova instance as returned by list_instances() :param xml: Optional; Domain XML of given libvirt instance. If omitted, this method attempts to extract it from the pre-existing definition. :param block_device_info: Optional; Can be used to filter out devices which are actually volumes. :return: json strings with below format:: "[{'path':'disk', 'type':'raw', 'virt_disk_size':'10737418240', 'backing_file':'backing_file', 'disk_size':'83886080'},...]" """ if xml is None: try: virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) except libvirt.libvirtError as ex: error_code = ex.get_error_code() msg = (_('Error from libvirt while getting description of ' '%(instance_name)s: [Error Code %(error_code)s] ' '%(ex)s') % {'instance_name': instance_name, 'error_code': error_code, 'ex': ex}) LOG.warn(msg) raise exception.InstanceNotFound(instance_id=instance_name) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) volume_devices = set() for vol in block_device_mapping: disk_dev = vol['mount_device'].rpartition("/")[2] volume_devices.add(disk_dev) disk_info = [] doc = etree.fromstring(xml) disk_nodes = doc.findall('.//devices/disk') path_nodes = doc.findall('.//devices/disk/source') driver_nodes = doc.findall('.//devices/disk/driver') target_nodes = doc.findall('.//devices/disk/target') for cnt, path_node in enumerate(path_nodes): disk_type = disk_nodes[cnt].get('type') path = path_node.get('file') target = target_nodes[cnt].attrib['dev'] if not path: LOG.debug(_('skipping disk for %s as it does not have a path'), instance_name) continue if disk_type != 'file': LOG.debug(_('skipping %s since it looks like volume'), path) continue if target in volume_devices: LOG.debug(_('skipping disk %(path)s (%(target)s) as it is a ' 'volume'), {'path': path, 'target': target}) continue # get the real disk size or # raise a localized error if image is unavailable dk_size = int(os.path.getsize(path)) disk_type = driver_nodes[cnt].get('type') if disk_type == "qcow2": backing_file = libvirt_utils.get_disk_backing_file(path) virt_size = disk.get_disk_size(path) over_commit_size = int(virt_size) - dk_size else: backing_file = "" virt_size = dk_size over_commit_size = 0 disk_info.append({'type': disk_type, 'path': path, 'virt_disk_size': virt_size, 'backing_file': backing_file, 'disk_size': dk_size, 'over_committed_disk_size': over_commit_size}) return jsonutils.dumps(disk_info) def get_disk_over_committed_size_total(self): """Return total over committed disk size for all instances.""" # Disk size that all instance uses : virtual_size - disk_size instances_name = self.list_instances() disk_over_committed_size = 0 for i_name in instances_name: try: disk_infos = jsonutils.loads( self.get_instance_disk_info(i_name)) for info in disk_infos: disk_over_committed_size += int( info['over_committed_disk_size']) except OSError as e: if e.errno == errno.ENOENT: LOG.warning(_('Periodic task is updating the host stat, ' 'it is trying to get disk %(i_name)s, ' 'but disk file was removed by concurrent ' 'operations such as resize.'), {'i_name': i_name}) else: raise except exception.InstanceNotFound: # Instance was deleted during the check so ignore it pass # NOTE(gtt116): give change to do other task. greenthread.sleep(0) return disk_over_committed_size def unfilter_instance(self, instance, network_info): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance, network_info=network_info) def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run update the stats first. """ return self.host_state.get_host_stats(refresh=refresh) def get_host_cpu_stats(self): """Return the current CPU state of the host.""" # Extract node's CPU statistics. stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0) # getInfo() returns various information about the host node # No. 3 is the expected CPU frequency. stats["frequency"] = self._conn.getInfo()[3] return stats def get_host_uptime(self, host): """Returns the result of calling "uptime".""" #NOTE(dprince): host seems to be ignored for this call and in # other compute drivers as well. Perhaps we should remove it? out, err = utils.execute('env', 'LANG=C', 'uptime') return out def manage_image_cache(self, context, all_instances): """Manage the local cache of images.""" self.image_cache_manager.update(context, all_instances) def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize, shared_storage=False): """Used only for cleanup in case migrate_disk_and_power_off fails.""" try: if os.path.exists(inst_base_resize): utils.execute('rm', '-rf', inst_base) utils.execute('mv', inst_base_resize, inst_base) if not shared_storage: utils.execute('ssh', dest, 'rm', '-rf', inst_base) except Exception: pass def _is_storage_shared_with(self, dest, inst_base): # NOTE (rmk): There are two methods of determining whether we are # on the same filesystem: the source and dest IP are the # same, or we create a file on the dest system via SSH # and check whether the source system can also see it. shared_storage = (dest == self.get_host_ip_addr()) if not shared_storage: tmp_file = uuid.uuid4().hex + '.tmp' tmp_path = os.path.join(inst_base, tmp_file) try: utils.execute('ssh', dest, 'touch', tmp_path) if os.path.exists(tmp_path): shared_storage = True os.unlink(tmp_path) else: utils.execute('ssh', dest, 'rm', tmp_path) except Exception: pass return shared_storage def migrate_disk_and_power_off(self, context, instance, dest, flavor, network_info, block_device_info=None): LOG.debug(_("Starting migrate_disk_and_power_off"), instance=instance) # Checks if the migration needs a disk resize down. for kind in ('root_gb', 'ephemeral_gb'): if flavor[kind] < instance[kind]: reason = _("Unable to resize disk down.") raise exception.InstanceFaultRollback( exception.ResizeError(reason=reason)) disk_info_text = self.get_instance_disk_info(instance['name'], block_device_info=block_device_info) disk_info = jsonutils.loads(disk_info_text) # copy disks to destination # rename instance dir to +_resize at first for using # shared storage for instance dir (eg. NFS). inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" shared_storage = self._is_storage_shared_with(dest, inst_base) # try to create the directory on the remote compute node # if this fails we pass the exception up the stack so we can catch # failures here earlier if not shared_storage: utils.execute('ssh', dest, 'mkdir', '-p', inst_base) self.power_off(instance) block_device_mapping = driver.block_device_info_get_mapping( block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] disk_dev = vol['mount_device'].rpartition("/")[2] self.volume_driver_method('disconnect_volume', connection_info, disk_dev) try: utils.execute('mv', inst_base, inst_base_resize) # if we are migrating the instance with shared storage then # create the directory. If it is a remote node the directory # has already been created if shared_storage: dest = None utils.execute('mkdir', '-p', inst_base) for info in disk_info: # assume inst_base == dirname(info['path']) img_path = info['path'] fname = os.path.basename(img_path) from_path = os.path.join(inst_base_resize, fname) if info['type'] == 'qcow2' and info['backing_file']: tmp_path = from_path + "_rbase" # merge backing file utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'qcow2', from_path, tmp_path) if shared_storage: utils.execute('mv', tmp_path, img_path) else: libvirt_utils.copy_image(tmp_path, img_path, host=dest) utils.execute('rm', '-f', tmp_path) else: # raw or qcow2 with no backing file libvirt_utils.copy_image(from_path, img_path, host=dest) except Exception: with excutils.save_and_reraise_exception(): self._cleanup_remote_migration(dest, inst_base, inst_base_resize, shared_storage) return disk_info_text def _wait_for_running(self, instance): state = self.get_info(instance)['state'] if state == power_state.RUNNING: LOG.info(_("Instance running successfully."), instance=instance) raise loopingcall.LoopingCallDone() def finish_migration(self, context, migration, instance, disk_info, network_info, image_meta, resize_instance, block_device_info=None, power_on=True): LOG.debug(_("Starting finish_migration"), instance=instance) # resize disks. only "disk" and "disk.local" are necessary. disk_info = jsonutils.loads(disk_info) for info in disk_info: fname = os.path.basename(info['path']) if fname == 'disk': size = instance['root_gb'] elif fname == 'disk.local': size = instance['ephemeral_gb'] else: size = 0 size *= units.Gi # If we have a non partitioned image that we can extend # then ensure we're in 'raw' format so we can extend file system. fmt = info['type'] if (size and fmt == 'qcow2' and disk.can_resize_image(info['path'], size) and disk.is_image_partitionless(info['path'], use_cow=True)): path_raw = info['path'] + '_raw' utils.execute('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', info['path'], path_raw) utils.execute('mv', path_raw, info['path']) fmt = 'raw' if size: use_cow = fmt == 'qcow2' disk.extend(info['path'], size, use_cow=use_cow) if fmt == 'raw' and CONF.use_cow_images: # back to qcow2 (no backing_file though) so that snapshot # will be available path_qcow = info['path'] + '_qcow' utils.execute('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', info['path'], path_qcow) utils.execute('mv', path_qcow, info['path']) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info, image_meta) # assume _create_image do nothing if a target file exists. self._create_image(context, instance, disk_mapping=disk_info['mapping'], network_info=network_info, block_device_info=None, inject_files=False) xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info, write_to_disk=True) self._create_domain_and_network(context, xml, instance, network_info, block_device_info, power_on, vifs_already_plugged=True) if power_on: timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() def _cleanup_failed_migration(self, inst_base): """Make sure that a failed migrate doesn't prevent us from rolling back in a revert. """ try: shutil.rmtree(inst_base) except OSError as e: if e.errno != errno.ENOENT: raise def finish_revert_migration(self, context, instance, network_info, block_device_info=None, power_on=True): LOG.debug(_("Starting finish_revert_migration"), instance=instance) inst_base = libvirt_utils.get_instance_path(instance) inst_base_resize = inst_base + "_resize" # NOTE(danms): if we're recovering from a failed migration, # make sure we don't have a left-over same-host base directory # that would conflict. Also, don't fail on the rename if the # failure happened early. if os.path.exists(inst_base_resize): self._cleanup_failed_migration(inst_base) utils.execute('mv', inst_base_resize, inst_base) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, block_device_info) xml = self.to_xml(context, instance, network_info, disk_info, block_device_info=block_device_info) self._create_domain_and_network(context, xml, instance, network_info, block_device_info, power_on) if power_on: timer = loopingcall.FixedIntervalLoopingCall( self._wait_for_running, instance) timer.start(interval=0.5).wait() def confirm_migration(self, migration, instance, network_info): """Confirms a resize, destroying the source VM.""" self._cleanup_resize(instance, network_info) def get_diagnostics(self, instance): def get_io_devices(xml_doc): """get the list of io devices from the xml document.""" result = {"volumes": [], "ifaces": []} try: doc = etree.fromstring(xml_doc) except Exception: return result blocks = [('./devices/disk', 'volumes'), ('./devices/interface', 'ifaces')] for block, key in blocks: section = doc.findall(block) for node in section: for child in node.getchildren(): if child.tag == 'target' and child.get('dev'): result[key].append(child.get('dev')) return result domain = self._lookup_by_name(instance['name']) output = {} # get cpu time, might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt try: cputime = domain.vcpus()[0] for i in range(len(cputime)): output["cpu" + str(i) + "_time"] = cputime[i][2] except libvirt.libvirtError: pass # get io status xml = domain.XMLDesc(0) dom_io = get_io_devices(xml) for disk in dom_io["volumes"]: try: # blockStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.blockStats(disk) output[disk + "_read_req"] = stats[0] output[disk + "_read"] = stats[1] output[disk + "_write_req"] = stats[2] output[disk + "_write"] = stats[3] output[disk + "_errors"] = stats[4] except libvirt.libvirtError: pass for interface in dom_io["ifaces"]: try: # interfaceStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt stats = domain.interfaceStats(interface) output[interface + "_rx"] = stats[0] output[interface + "_rx_packets"] = stats[1] output[interface + "_rx_errors"] = stats[2] output[interface + "_rx_drop"] = stats[3] output[interface + "_tx"] = stats[4] output[interface + "_tx_packets"] = stats[5] output[interface + "_tx_errors"] = stats[6] output[interface + "_tx_drop"] = stats[7] except libvirt.libvirtError: pass output["memory"] = domain.maxMemory() # memoryStats might launch an exception if the method # is not supported by the underlying hypervisor being # used by libvirt try: mem = domain.memoryStats() for key in mem.keys(): output["memory-" + key] = mem[key] except (libvirt.libvirtError, AttributeError): pass return output def instance_on_disk(self, instance): # ensure directories exist and are writable instance_path = libvirt_utils.get_instance_path(instance) LOG.debug(_('Checking instance files accessibility %s'), instance_path) shared_instance_path = os.access(instance_path, os.W_OK) # NOTE(flwang): For shared block storage scenario, the file system is # not really shared by the two hosts, but the volume of evacuated # instance is reachable. shared_block_storage = (self.image_backend.backend(). is_shared_block_storage()) return shared_instance_path or shared_block_storage def inject_network_info(self, instance, nw_info): self.firewall_driver.setup_basic_filtering(instance, nw_info) def _delete_instance_files(self, instance): # NOTE(mikal): a shim to handle this file not using instance objects # everywhere. Remove this when that conversion happens. context = nova_context.get_admin_context(read_deleted='yes') inst_obj = instance_obj.Instance.get_by_uuid(context, instance['uuid']) # NOTE(mikal): this code should be pushed up a layer when this shim is # removed. attempts = int(inst_obj.system_metadata.get('clean_attempts', '0')) success = self.delete_instance_files(inst_obj) inst_obj.system_metadata['clean_attempts'] = str(attempts + 1) if success: inst_obj.cleaned = True inst_obj.save(context) def delete_instance_files(self, instance): target = libvirt_utils.get_instance_path(instance) if os.path.exists(target): LOG.info(_('Deleting instance files %s'), target, instance=instance) try: shutil.rmtree(target) except OSError as e: LOG.error(_('Failed to cleanup directory %(target)s: ' '%(e)s'), {'target': target, 'e': e}, instance=instance) # It is possible that the delete failed, if so don't mark the instance # as cleaned. if os.path.exists(target): LOG.info(_('Deletion of %s failed'), target, instance=instance) return False LOG.info(_('Deletion of %s complete'), target, instance=instance) return True @property def need_legacy_block_device_info(self): return False def default_root_device_name(self, instance, image_meta, root_bdm): disk_bus = blockinfo.get_disk_bus_for_device_type( CONF.libvirt.virt_type, image_meta, "disk") cdrom_bus = blockinfo.get_disk_bus_for_device_type( CONF.libvirt.virt_type, image_meta, "cdrom") root_info = blockinfo.get_root_info( CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus, cdrom_bus) return block_device.prepend_dev(root_info['dev']) def default_device_names_for_instance(self, instance, root_device_name, *block_device_lists): ephemerals, swap, block_device_mapping = block_device_lists[:3] blockinfo.default_device_names(CONF.libvirt.virt_type, nova_context.get_admin_context(), instance, root_device_name, ephemerals, swap, block_device_mapping) class HostState(object): """Manages information about the compute node through libvirt.""" def __init__(self, driver): super(HostState, self).__init__() self._stats = {} self.driver = driver self.update_status() def get_host_stats(self, refresh=False): """Return the current state of the host. If 'refresh' is True, run update the stats first. """ if refresh or not self._stats: self.update_status() return self._stats def update_status(self): """Retrieve status info from libvirt.""" def _get_disk_available_least(): """Return total real disk available least size. The size of available disk, when block_migration command given disk_over_commit param is FALSE. The size that deducted real instance disk size from the total size of the virtual disk of all instances. """ disk_free_gb = disk_info_dict['free'] disk_over_committed = (self.driver. get_disk_over_committed_size_total()) # Disk available least size available_least = disk_free_gb * units.Gi - disk_over_committed return (available_least / units.Gi) LOG.debug(_("Updating host stats")) disk_info_dict = self.driver.get_local_gb_info() data = {} #NOTE(dprince): calling capabilities before getVersion works around # an initialization issue with some versions of Libvirt (1.0.5.5). # See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116 # See: https://bugs.launchpad.net/nova/+bug/1215593 data["supported_instances"] = \ self.driver.get_instance_capabilities() data["vcpus"] = self.driver.get_vcpu_total() data["memory_mb"] = self.driver.get_memory_mb_total() data["local_gb"] = disk_info_dict['total'] data["vcpus_used"] = self.driver.get_vcpu_used() data["memory_mb_used"] = self.driver.get_memory_mb_used() data["local_gb_used"] = disk_info_dict['used'] data["hypervisor_type"] = self.driver.get_hypervisor_type() data["hypervisor_version"] = self.driver.get_hypervisor_version() data["hypervisor_hostname"] = self.driver.get_hypervisor_hostname() data["cpu_info"] = self.driver.get_cpu_info() data['disk_available_least'] = _get_disk_available_least() data['pci_passthrough_devices'] = \ self.driver.get_pci_passthrough_devices() self._stats = data return data nova-2014.1.5/nova/virt/libvirt/__init__.py0000664000567000056700000000124512540642532021567 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.libvirt import driver LibvirtDriver = driver.LibvirtDriver nova-2014.1.5/nova/virt/libvirt/imagebackend.py0000664000567000056700000006757612540642544022450 0ustar jenkinsjenkins00000000000000# Copyright 2012 Grid Dynamics # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import contextlib import os import six from oslo.config import cfg from nova import exception from nova.openstack.common import excutils from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import units from nova import utils from nova.virt.disk import api as disk from nova.virt import images from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import utils as libvirt_utils try: import rados import rbd except ImportError: rados = None rbd = None __imagebackend_opts = [ cfg.StrOpt('images_type', default='default', help='VM Images format. Acceptable values are: raw, qcow2, lvm,' ' rbd, default. If default is specified,' ' then use_cow_images flag is used instead of this one.', deprecated_group='DEFAULT', deprecated_name='libvirt_images_type'), cfg.StrOpt('images_volume_group', help='LVM Volume Group that is used for VM images, when you' ' specify images_type=lvm.', deprecated_group='DEFAULT', deprecated_name='libvirt_images_volume_group'), cfg.BoolOpt('sparse_logical_volumes', default=False, help='Create sparse logical volumes (with virtualsize)' ' if this flag is set to True.', deprecated_group='DEFAULT', deprecated_name='libvirt_sparse_logical_volumes'), cfg.StrOpt('volume_clear', default='zero', help='Method used to wipe old volumes (valid options are: ' 'none, zero, shred)'), cfg.IntOpt('volume_clear_size', default=0, help='Size in MiB to wipe at start of old volumes. 0 => all'), cfg.StrOpt('images_rbd_pool', default='rbd', help='The RADOS pool in which rbd volumes are stored', deprecated_group='DEFAULT', deprecated_name='libvirt_images_rbd_pool'), cfg.StrOpt('images_rbd_ceph_conf', default='', # default determined by librados help='Path to the ceph configuration file to use', deprecated_group='DEFAULT', deprecated_name='libvirt_images_rbd_ceph_conf'), ] CONF = cfg.CONF CONF.register_opts(__imagebackend_opts, 'libvirt') CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache') CONF.import_opt('preallocate_images', 'nova.virt.driver') LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Image(object): def __init__(self, source_type, driver_format, is_block_dev=False): """Image initialization. :source_type: block or file :driver_format: raw or qcow2 :is_block_dev: """ self.source_type = source_type self.driver_format = driver_format self.is_block_dev = is_block_dev self.preallocate = False # NOTE(dripton): We store lines of json (path, disk_format) in this # file, for some image types, to prevent attacks based on changing the # disk_format. self.disk_info_path = None # NOTE(mikal): We need a lock directory which is shared along with # instance files, to cover the scenario where multiple compute nodes # are trying to create a base file at the same time self.lock_path = os.path.join(CONF.instances_path, 'locks') @abc.abstractmethod def create_image(self, prepare_template, base, size, *args, **kwargs): """Create image from template. Contains specific behavior for each image type. :prepare_template: function, that creates template. Should accept `target` argument. :base: Template name :size: Size of created image in bytes """ pass def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, extra_specs, hypervisor_version): """Get `LibvirtConfigGuestDisk` filled for this image. :disk_dev: Disk bus device name :disk_bus: Disk bus type :device_type: Device type for this image. :cache_mode: Caching mode for this image :extra_specs: Instance type extra specs dict. """ info = vconfig.LibvirtConfigGuestDisk() info.source_type = self.source_type info.source_device = device_type info.target_bus = disk_bus info.target_dev = disk_dev info.driver_cache = cache_mode info.driver_format = self.driver_format driver_name = libvirt_utils.pick_disk_driver_name(hypervisor_version, self.is_block_dev) info.driver_name = driver_name info.source_path = self.path tune_items = ['disk_read_bytes_sec', 'disk_read_iops_sec', 'disk_write_bytes_sec', 'disk_write_iops_sec', 'disk_total_bytes_sec', 'disk_total_iops_sec'] # Note(yaguang): Currently, the only tuning available is Block I/O # throttling for qemu. if self.source_type in ['file', 'block']: for key, value in extra_specs.iteritems(): scope = key.split(':') if len(scope) > 1 and scope[0] == 'quota': if scope[1] in tune_items: setattr(info, scope[1], value) return info def check_image_exists(self): return os.path.exists(self.path) def cache(self, fetch_func, filename, size=None, *args, **kwargs): """Creates image from template. Ensures that template and image not already exists. Ensures that base directory exists. Synchronizes on template fetching. :fetch_func: Function that creates the base image Should accept `target` argument. :filename: Name of the file in the image directory :size: Size of created image in bytes (optional) """ @utils.synchronized(filename, external=True, lock_path=self.lock_path) def fetch_func_sync(target, *args, **kwargs): fetch_func(target=target, *args, **kwargs) base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if not os.path.exists(base_dir): fileutils.ensure_tree(base_dir) base = os.path.join(base_dir, filename) if not self.check_image_exists() or not os.path.exists(base): self.create_image(fetch_func_sync, base, size, *args, **kwargs) if (size and self.preallocate and self._can_fallocate() and os.access(self.path, os.W_OK)): utils.execute('fallocate', '-n', '-l', size, self.path) def _can_fallocate(self): """Check once per class, whether fallocate(1) is available, and that the instances directory supports fallocate(2). """ can_fallocate = getattr(self.__class__, 'can_fallocate', None) if can_fallocate is None: _out, err = utils.trycmd('fallocate', '-n', '-l', '1', self.path + '.fallocate_test') fileutils.delete_if_exists(self.path + '.fallocate_test') can_fallocate = not err self.__class__.can_fallocate = can_fallocate if not can_fallocate: LOG.error(_('Unable to preallocate_images=%(imgs)s at path: ' '%(path)s'), {'imgs': CONF.preallocate_images, 'path': self.path}) return can_fallocate @staticmethod def verify_base_size(base, size, base_size=0): """Check that the base image is not larger than size. Since images can't be generally shrunk, enforce this constraint taking account of virtual image size. """ # Note(pbrady): The size and min_disk parameters of a glance # image are checked against the instance size before the image # is even downloaded from glance, but currently min_disk is # adjustable and doesn't currently account for virtual disk size, # so we need this extra check here. # NOTE(cfb): Having a flavor that sets the root size to 0 and having # nova effectively ignore that size and use the size of the # image is considered a feature at this time, not a bug. if size is None: return if size and not base_size: base_size = disk.get_disk_size(base) if size < base_size: msg = _('%(base)s virtual size %(base_size)s ' 'larger than flavor root disk size %(size)s') LOG.error(msg % {'base': base, 'base_size': base_size, 'size': size}) raise exception.FlavorDiskTooSmall() def snapshot_extract(self, target, out_format): raise NotImplementedError() def _get_driver_format(self): return self.driver_format def resolve_driver_format(self): """Return the driver format for self.path. First checks self.disk_info_path for an entry. If it's not there, calls self._get_driver_format(), and then stores the result in self.disk_info_path See https://bugs.launchpad.net/nova/+bug/1221190 """ def _dict_from_line(line): if not line: return {} try: return jsonutils.loads(line) except (TypeError, ValueError) as e: msg = (_("Could not load line %(line)s, got error " "%(error)s") % {'line': line, 'error': unicode(e)}) raise exception.InvalidDiskInfo(reason=msg) @utils.synchronized(self.disk_info_path, external=False, lock_path=self.lock_path) def write_to_disk_info_file(): # Use os.open to create it without group or world write permission. fd = os.open(self.disk_info_path, os.O_RDWR | os.O_CREAT, 0o644) with os.fdopen(fd, "r+") as disk_info_file: line = disk_info_file.read().rstrip() dct = _dict_from_line(line) if self.path in dct: msg = _("Attempted overwrite of an existing value.") raise exception.InvalidDiskInfo(reason=msg) dct.update({self.path: driver_format}) disk_info_file.seek(0) disk_info_file.truncate() disk_info_file.write('%s\n' % jsonutils.dumps(dct)) # Ensure the file is always owned by the nova user so qemu can't # write it. utils.chown(self.disk_info_path, owner_uid=os.getuid()) try: if (self.disk_info_path is not None and os.path.exists(self.disk_info_path)): with open(self.disk_info_path) as disk_info_file: line = disk_info_file.read().rstrip() dct = _dict_from_line(line) for path, driver_format in dct.iteritems(): if path == self.path: return driver_format driver_format = self._get_driver_format() if self.disk_info_path is not None: fileutils.ensure_tree(os.path.dirname(self.disk_info_path)) write_to_disk_info_file() except OSError as e: raise exception.DiskInfoReadWriteFail(reason=unicode(e)) return driver_format @staticmethod def is_shared_block_storage(): """True if the backend puts images on a shared block storage.""" return False class Raw(Image): def __init__(self, instance=None, disk_name=None, path=None): super(Raw, self).__init__("file", "raw", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.preallocate = CONF.preallocate_images != 'none' self.disk_info_path = os.path.join(os.path.dirname(self.path), 'disk.info') self.correct_format() def _get_driver_format(self): data = images.qemu_img_info(self.path) return data.file_format or 'raw' def correct_format(self): if os.path.exists(self.path): self.driver_format = self.resolve_driver_format() def create_image(self, prepare_template, base, size, *args, **kwargs): filename = os.path.split(base)[-1] @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_raw_image(base, target, size): libvirt_utils.copy_image(base, target) if size: # class Raw is misnamed, format may not be 'raw' in all cases use_cow = self.driver_format == 'qcow2' disk.extend(target, size, use_cow=use_cow) generating = 'image_id' not in kwargs if generating: if not self.check_image_exists(): #Generating image in place prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) self.verify_base_size(base, size) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_raw_image(base, self.path, size) self.correct_format() def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, out_format) class Qcow2(Image): def __init__(self, instance=None, disk_name=None, path=None): super(Qcow2, self).__init__("file", "qcow2", is_block_dev=False) self.path = (path or os.path.join(libvirt_utils.get_instance_path(instance), disk_name)) self.preallocate = CONF.preallocate_images != 'none' self.disk_info_path = os.path.join(os.path.dirname(self.path), 'disk.info') self.resolve_driver_format() def create_image(self, prepare_template, base, size, *args, **kwargs): filename = os.path.split(base)[-1] @utils.synchronized(filename, external=True, lock_path=self.lock_path) def copy_qcow2_image(base, target, size): # TODO(pbrady): Consider copying the cow image here # with preallocation=metadata set for performance reasons. # This would be keyed on a 'preallocate_images' setting. libvirt_utils.create_cow_image(base, target) if size: disk.extend(target, size, use_cow=True) # Download the unmodified base image unless we already have a copy. if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) else: self.verify_base_size(base, size) legacy_backing_size = None legacy_base = base # Determine whether an existing qcow2 disk uses a legacy backing by # actually looking at the image itself and parsing the output of the # backing file it expects to be using. if os.path.exists(self.path): backing_path = libvirt_utils.get_disk_backing_file(self.path) if backing_path is not None: backing_file = os.path.basename(backing_path) backing_parts = backing_file.rpartition('_') if backing_file != backing_parts[-1] and \ backing_parts[-1].isdigit(): legacy_backing_size = int(backing_parts[-1]) legacy_base += '_%d' % legacy_backing_size legacy_backing_size *= units.Gi # Create the legacy backing file if necessary. if legacy_backing_size: if not os.path.exists(legacy_base): with fileutils.remove_path_on_error(legacy_base): libvirt_utils.copy_image(base, legacy_base) disk.extend(legacy_base, legacy_backing_size, use_cow=True) if not os.path.exists(self.path): with fileutils.remove_path_on_error(self.path): copy_qcow2_image(base, self.path, size) def snapshot_extract(self, target, out_format): libvirt_utils.extract_snapshot(self.path, 'qcow2', target, out_format) class Lvm(Image): @staticmethod def escape(filename): return filename.replace('_', '__') def __init__(self, instance=None, disk_name=None, path=None): super(Lvm, self).__init__("block", "raw", is_block_dev=True) if path: info = libvirt_utils.logical_volume_info(path) self.vg = info['VG'] self.lv = info['LV'] self.path = path else: if not CONF.libvirt.images_volume_group: raise RuntimeError(_('You should specify' ' images_volume_group' ' flag to use LVM images.')) self.vg = CONF.libvirt.images_volume_group self.lv = '%s_%s' % (instance['uuid'], self.escape(disk_name)) self.path = os.path.join('/dev', self.vg, self.lv) # TODO(pbrady): possibly deprecate libvirt.sparse_logical_volumes # for the more general preallocate_images self.sparse = CONF.libvirt.sparse_logical_volumes self.preallocate = not self.sparse def _can_fallocate(self): return False def create_image(self, prepare_template, base, size, *args, **kwargs): filename = os.path.split(base)[-1] @utils.synchronized(filename, external=True, lock_path=self.lock_path) def create_lvm_image(base, size): base_size = disk.get_disk_size(base) self.verify_base_size(base, size, base_size=base_size) resize = size > base_size size = size if resize else base_size libvirt_utils.create_lvm_image(self.vg, self.lv, size, sparse=self.sparse) images.convert_image(base, self.path, 'raw', run_as_root=True) if resize: disk.resize2fs(self.path, run_as_root=True) generated = 'ephemeral_size' in kwargs #Generate images with specified size right on volume if generated and size: libvirt_utils.create_lvm_image(self.vg, self.lv, size, sparse=self.sparse) with self.remove_volume_on_error(self.path): prepare_template(target=self.path, *args, **kwargs) else: if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) with self.remove_volume_on_error(self.path): create_lvm_image(base, size) @contextlib.contextmanager def remove_volume_on_error(self, path): try: yield except Exception: with excutils.save_and_reraise_exception(): libvirt_utils.remove_logical_volumes(path) def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, out_format, run_as_root=True) class RBDVolumeProxy(object): """Context manager for dealing with an existing rbd volume. This handles connecting to rados and opening an ioctx automatically, and otherwise acts like a librbd Image object. The underlying librados client and ioctx can be accessed as the attributes 'client' and 'ioctx'. """ def __init__(self, driver, name, pool=None): client, ioctx = driver._connect_to_rados(pool) try: self.volume = driver.rbd.Image(ioctx, str(name), snapshot=None) except driver.rbd.Error: LOG.exception(_("error opening rbd image %s"), name) driver._disconnect_from_rados(client, ioctx) raise self.driver = driver self.client = client self.ioctx = ioctx def __enter__(self): return self def __exit__(self, type_, value, traceback): try: self.volume.close() finally: self.driver._disconnect_from_rados(self.client, self.ioctx) def __getattr__(self, attrib): return getattr(self.volume, attrib) def ascii_str(s): """Convert a string to ascii, or return None if the input is None. This is useful when a parameter is None by default, or a string. LibRBD only accepts ascii, hence the need for conversion. """ if s is None: return s return str(s) class Rbd(Image): def __init__(self, instance=None, disk_name=None, path=None, **kwargs): super(Rbd, self).__init__("block", "rbd", is_block_dev=False) if path: try: self.rbd_name = path.split('/')[1] except IndexError: raise exception.InvalidDevicePath(path=path) else: self.rbd_name = '%s_%s' % (instance['uuid'], disk_name) if not CONF.libvirt.images_rbd_pool: raise RuntimeError(_('You should specify' ' images_rbd_pool' ' flag to use rbd images.')) self.pool = CONF.libvirt.images_rbd_pool self.ceph_conf = ascii_str(CONF.libvirt.images_rbd_ceph_conf) self.rbd_user = ascii_str(CONF.libvirt.rbd_user) self.rbd = kwargs.get('rbd', rbd) self.rados = kwargs.get('rados', rados) self.path = 'rbd:%s/%s' % (self.pool, self.rbd_name) if self.rbd_user: self.path += ':id=' + self.rbd_user if self.ceph_conf: self.path += ':conf=' + self.ceph_conf def _connect_to_rados(self, pool=None): client = self.rados.Rados(rados_id=self.rbd_user, conffile=self.ceph_conf) try: client.connect() pool_to_open = str(pool or self.pool) ioctx = client.open_ioctx(pool_to_open) return client, ioctx except self.rados.Error: # shutdown cannot raise an exception client.shutdown() raise def _disconnect_from_rados(self, client, ioctx): # closing an ioctx cannot raise an exception ioctx.close() client.shutdown() def _supports_layering(self): return hasattr(self.rbd, 'RBD_FEATURE_LAYERING') def _ceph_args(self): args = [] if self.rbd_user: args.extend(['--id', self.rbd_user]) if self.ceph_conf: args.extend(['--conf', self.ceph_conf]) return args def _get_mon_addrs(self): args = ['ceph', 'mon', 'dump', '--format=json'] + self._ceph_args() out, _ = utils.execute(*args) lines = out.split('\n') if lines[0].startswith('dumped monmap epoch'): lines = lines[1:] monmap = jsonutils.loads('\n'.join(lines)) addrs = [mon['addr'] for mon in monmap['mons']] hosts = [] ports = [] for addr in addrs: host_port = addr[:addr.rindex('/')] host, port = host_port.rsplit(':', 1) hosts.append(host.strip('[]')) ports.append(port) return hosts, ports def libvirt_info(self, disk_bus, disk_dev, device_type, cache_mode, extra_specs, hypervisor_version): """Get `LibvirtConfigGuestDisk` filled for this image. :disk_dev: Disk bus device name :disk_bus: Disk bus type :device_type: Device type for this image. :cache_mode: Caching mode for this image :extra_specs: Instance type extra specs dict. """ info = vconfig.LibvirtConfigGuestDisk() hosts, ports = self._get_mon_addrs() info.device_type = device_type info.driver_format = 'raw' info.driver_cache = cache_mode info.target_bus = disk_bus info.target_dev = disk_dev info.source_type = 'network' info.source_protocol = 'rbd' info.source_name = '%s/%s' % (self.pool, self.rbd_name) info.source_hosts = hosts info.source_ports = ports auth_enabled = (CONF.libvirt.rbd_user is not None) if CONF.libvirt.rbd_secret_uuid: info.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid auth_enabled = True # Force authentication locally if CONF.libvirt.rbd_user: info.auth_username = CONF.libvirt.rbd_user if auth_enabled: info.auth_secret_type = 'ceph' info.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid return info def _can_fallocate(self): return False def check_image_exists(self): rbd_volumes = libvirt_utils.list_rbd_volumes(self.pool) for vol in rbd_volumes: if vol.startswith(self.rbd_name): return True return False def _resize(self, volume_name, size): with RBDVolumeProxy(self, volume_name) as vol: vol.resize(int(size)) def create_image(self, prepare_template, base, size, *args, **kwargs): if self.rbd is None: raise RuntimeError(_('rbd python libraries not found')) if not os.path.exists(base): prepare_template(target=base, max_size=size, *args, **kwargs) else: self.verify_base_size(base, size) # keep using the command line import instead of librbd since it # detects zeroes to preserve sparseness in the image args = ['--pool', self.pool, base, self.rbd_name] if self._supports_layering(): args += ['--new-format'] args += self._ceph_args() libvirt_utils.import_rbd_image(*args) base_size = disk.get_disk_size(base) if size and size > base_size: self._resize(self.rbd_name, size) def snapshot_extract(self, target, out_format): images.convert_image(self.path, target, out_format) @staticmethod def is_shared_block_storage(): return True class Backend(object): def __init__(self, use_cow): self.BACKEND = { 'raw': Raw, 'qcow2': Qcow2, 'lvm': Lvm, 'rbd': Rbd, 'default': Qcow2 if use_cow else Raw } def backend(self, image_type=None): if not image_type: image_type = CONF.libvirt.images_type image = self.BACKEND.get(image_type) if not image: raise RuntimeError(_('Unknown image_type=%s') % image_type) return image def image(self, instance, disk_name, image_type=None): """Constructs image for selected backend :instance: Instance name. :name: Image name. :image_type: Image type. Optional, is CONF.libvirt.images_type by default. """ backend = self.backend(image_type) return backend(instance=instance, disk_name=disk_name) def snapshot(self, disk_path, image_type=None): """Returns snapshot for given image :path: path to image :image_type: type of image """ backend = self.backend(image_type) return backend(path=disk_path) nova-2014.1.5/nova/virt/libvirt/vif.py0000664000567000056700000010070512540642544020620 0ustar jenkinsjenkins00000000000000# Copyright (C) 2011 Midokura KK # Copyright (C) 2011 Nicira, Inc # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """VIF drivers for libvirt.""" import copy from oslo.config import cfg from nova import exception from nova.network import linux_net from nova.network import model as network_model from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import designer LOG = logging.getLogger(__name__) libvirt_vif_opts = [ cfg.BoolOpt('use_virtio_for_bridges', default=True, help='Use virtio for bridge interfaces with KVM/QEMU', deprecated_group='DEFAULT', deprecated_name='libvirt_use_virtio_for_bridges'), ] CONF = cfg.CONF CONF.register_opts(libvirt_vif_opts, 'libvirt') CONF.import_opt('virt_type', 'nova.virt.libvirt.driver', group='libvirt') CONF.import_opt('use_ipv6', 'nova.netconf') # Since libvirt 0.9.11, # supports OpenVSwitch natively. LIBVIRT_OVS_VPORT_VERSION = 9011 DEV_PREFIX_ETH = 'eth' def is_vif_model_valid_for_virt(virt_type, vif_model): valid_models = { 'qemu': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'], 'kvm': ['virtio', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'], 'xen': ['netfront', 'ne2k_pci', 'pcnet', 'rtl8139', 'e1000'], 'lxc': [], 'uml': [], } if vif_model is None: return True if virt_type not in valid_models: raise exception.UnsupportedVirtType(virt=virt_type) return vif_model in valid_models[virt_type] class LibvirtBaseVIFDriver(object): def __init__(self, get_connection): self.get_connection = get_connection self.libvirt_version = None def has_libvirt_version(self, want): if self.libvirt_version is None: conn = self.get_connection() self.libvirt_version = conn.getLibVersion() if self.libvirt_version >= want: return True return False def get_vif_devname(self, vif): if 'devname' in vif: return vif['devname'] return ("nic" + vif['id'])[:network_model.NIC_NAME_LEN] def get_vif_devname_with_prefix(self, vif, prefix): devname = self.get_vif_devname(vif) return prefix + devname[3:] def get_config(self, instance, vif, image_meta, inst_type): conf = vconfig.LibvirtConfigGuestInterface() # Default to letting libvirt / the hypervisor choose the model model = None driver = None # If the user has specified a 'vif_model' against the # image then honour that model if image_meta: vif_model = image_meta.get('properties', {}).get('hw_vif_model') if vif_model is not None: model = vif_model # Else if the virt type is KVM/QEMU, use virtio according # to the global config parameter if (model is None and CONF.libvirt.virt_type in ('kvm', 'qemu') and CONF.libvirt.use_virtio_for_bridges): model = "virtio" # Workaround libvirt bug, where it mistakenly # enables vhost mode, even for non-KVM guests if model == "virtio" and CONF.libvirt.virt_type == "qemu": driver = "qemu" if not is_vif_model_valid_for_virt(CONF.libvirt.virt_type, model): raise exception.UnsupportedHardware(model=model, virt=CONF.libvirt.virt_type) designer.set_vif_guest_frontend_config( conf, vif['address'], model, driver) return conf def plug(self, instance, vif): pass def unplug(self, instance, vif): pass class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): """Generic VIF driver for libvirt networking.""" def get_bridge_name(self, vif): return vif['network']['bridge'] def get_ovs_interfaceid(self, vif): return vif.get('ovs_interfaceid') or vif['id'] def get_br_name(self, iface_id): return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN] def get_veth_pair_names(self, iface_id): return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN], ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN]) def get_firewall_required(self, vif): if vif.is_neutron_filtering_enabled(): return False if CONF.firewall_driver != "nova.virt.firewall.NoopFirewallDriver": return True return False def get_config_bridge(self, instance, vif, image_meta, inst_type): """Get VIF configurations for bridge type.""" conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) designer.set_vif_host_backend_bridge_config( conf, self.get_bridge_name(vif), self.get_vif_devname(vif)) mac_id = vif['address'].replace(':', '') name = "nova-instance-" + instance['name'] + "-" + mac_id if self.get_firewall_required(vif): conf.filtername = name designer.set_vif_bandwidth_config(conf, inst_type) return conf def get_config_ovs_ethernet(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) return conf def get_config_ovs_bridge(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) designer.set_vif_host_backend_ovs_config( conf, self.get_bridge_name(vif), self.get_ovs_interfaceid(vif), self.get_vif_devname(vif)) designer.set_vif_bandwidth_config(conf, inst_type) return conf def get_config_ovs_hybrid(self, instance, vif, image_meta, inst_type): newvif = copy.deepcopy(vif) newvif['network']['bridge'] = self.get_br_name(vif['id']) return self.get_config_bridge(instance, newvif, image_meta, inst_type) def get_config_ovs(self, instance, vif, image_meta, inst_type): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): return self.get_config_ovs_hybrid(instance, vif, image_meta, inst_type) elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION): return self.get_config_ovs_bridge(instance, vif, image_meta, inst_type) else: return self.get_config_ovs_ethernet(instance, vif, image_meta, inst_type) def get_config_ivs_hybrid(self, instance, vif, image_meta, inst_type): newvif = copy.deepcopy(vif) newvif['network']['bridge'] = self.get_br_name(vif['id']) return self.get_config_bridge(instance, newvif, image_meta, inst_type) def get_config_ivs_ethernet(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) return conf def get_config_ivs(self, instance, vif, image_meta, inst_type): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): return self.get_config_ivs_hybrid(instance, vif, image_meta, inst_type) else: return self.get_config_ivs_ethernet(instance, vif, image_meta, inst_type) def get_config_802qbg(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) params = vif["qbg_params"] designer.set_vif_host_backend_802qbg_config( conf, vif['network'].get_meta('interface'), params['managerid'], params['typeid'], params['typeidversion'], params['instanceid']) designer.set_vif_bandwidth_config(conf, inst_type) return conf def get_config_802qbh(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) params = vif["qbh_params"] designer.set_vif_host_backend_802qbh_config( conf, vif['network'].get_meta('interface'), params['profileid']) designer.set_vif_bandwidth_config(conf, inst_type) return conf def get_config_iovisor(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) designer.set_vif_bandwidth_config(conf, inst_type) return conf def get_config_midonet(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) dev = self.get_vif_devname(vif) designer.set_vif_host_backend_ethernet_config(conf, dev) return conf def get_config_mlnx_direct(self, instance, vif, image_meta, inst_type): conf = super(LibvirtGenericVIFDriver, self).get_config(instance, vif, image_meta, inst_type) devname = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH) designer.set_vif_host_backend_direct_config(conf, devname) designer.set_vif_bandwidth_config(conf, inst_type) return conf def get_config(self, instance, vif, image_meta, inst_type): vif_type = vif['type'] LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s'), {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if vif_type is None: raise exception.NovaException( _("vif_type parameter must be present " "for this vif_driver implementation")) elif vif_type == network_model.VIF_TYPE_BRIDGE: return self.get_config_bridge(instance, vif, image_meta, inst_type) elif vif_type == network_model.VIF_TYPE_OVS: return self.get_config_ovs(instance, vif, image_meta, inst_type) elif vif_type == network_model.VIF_TYPE_802_QBG: return self.get_config_802qbg(instance, vif, image_meta, inst_type) elif vif_type == network_model.VIF_TYPE_802_QBH: return self.get_config_802qbh(instance, vif, image_meta, inst_type) elif vif_type == network_model.VIF_TYPE_IVS: return self.get_config_ivs(instance, vif, image_meta, inst_type) elif vif_type == network_model.VIF_TYPE_IOVISOR: return self.get_config_iovisor(instance, vif, image_meta, inst_type) elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT: return self.get_config_mlnx_direct(instance, vif, image_meta, inst_type) elif vif_type == network_model.VIF_TYPE_MIDONET: return self.get_config_midonet(instance, vif, image_meta, inst_type) else: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type) def plug_bridge(self, instance, vif): """Ensure that the bridge exists, and add VIF to it.""" super(LibvirtGenericVIFDriver, self).plug(instance, vif) network = vif['network'] if (not network.get_meta('multi_host', False) and network.get_meta('should_create_bridge', False)): if network.get_meta('should_create_vlan', False): iface = CONF.vlan_interface or \ network.get_meta('bridge_interface') LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), {'vlan': network.get_meta('vlan'), 'bridge': self.get_bridge_name(vif)}, instance=instance) linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( network.get_meta('vlan'), self.get_bridge_name(vif), iface) else: iface = CONF.flat_interface or \ network.get_meta('bridge_interface') LOG.debug(_("Ensuring bridge %s"), self.get_bridge_name(vif), instance=instance) linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( self.get_bridge_name(vif), iface) def plug_ovs_ethernet(self, instance, vif): super(LibvirtGenericVIFDriver, self).plug(instance, vif) network = vif['network'] iface_id = self.get_ovs_interfaceid(vif) dev = self.get_vif_devname(vif) linux_net.create_tap_dev(dev) linux_net.create_ovs_vif_port(self.get_bridge_name(vif), dev, iface_id, vif['address'], instance['uuid']) def plug_ovs_bridge(self, instance, vif): """No manual plugging required.""" super(LibvirtGenericVIFDriver, self).plug(instance, vif) def plug_ovs_hybrid(self, instance, vif): """Plug using hybrid strategy Create a per-VIF linux bridge, then link that bridge to the OVS integration bridge via a veth device, setting up the other end of the veth device just like a normal OVS port. Then boot the VIF on the linux bridge using standard libvirt mechanisms. """ super(LibvirtGenericVIFDriver, self).plug(instance, vif) iface_id = self.get_ovs_interfaceid(vif) br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) if not linux_net.device_exists(br_name): utils.execute('brctl', 'addbr', br_name, run_as_root=True) utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True) utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True) utils.execute('tee', ('/sys/class/net/%s/bridge/multicast_snooping' % br_name), process_input='0', run_as_root=True, check_exit_code=[0, 1]) if not linux_net.device_exists(v2_name): linux_net._create_veth_pair(v1_name, v2_name) utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True) utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True) linux_net.create_ovs_vif_port(self.get_bridge_name(vif), v2_name, iface_id, vif['address'], instance['uuid']) def plug_ovs(self, instance, vif): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): self.plug_ovs_hybrid(instance, vif) elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION): self.plug_ovs_bridge(instance, vif) else: self.plug_ovs_ethernet(instance, vif) def plug_ivs_ethernet(self, instance, vif): super(LibvirtGenericVIFDriver, self).plug(instance, vif) iface_id = self.get_ovs_interfaceid(vif) dev = self.get_vif_devname(vif) linux_net.create_tap_dev(dev) linux_net.create_ivs_vif_port(dev, iface_id, vif['address'], instance['uuid']) def plug_ivs_hybrid(self, instance, vif): """Plug using hybrid strategy (same as OVS) Create a per-VIF linux bridge, then link that bridge to the OVS integration bridge via a veth device, setting up the other end of the veth device just like a normal IVS port. Then boot the VIF on the linux bridge using standard libvirt mechanisms. """ super(LibvirtGenericVIFDriver, self).plug(instance, vif) iface_id = self.get_ovs_interfaceid(vif) br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) if not linux_net.device_exists(br_name): utils.execute('brctl', 'addbr', br_name, run_as_root=True) utils.execute('brctl', 'setfd', br_name, 0, run_as_root=True) utils.execute('brctl', 'stp', br_name, 'off', run_as_root=True) utils.execute('tee', ('/sys/class/net/%s/bridge/multicast_snooping' % br_name), process_input='0', run_as_root=True, check_exit_code=[0, 1]) if not linux_net.device_exists(v2_name): linux_net._create_veth_pair(v1_name, v2_name) utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True) utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True) linux_net.create_ivs_vif_port(v2_name, iface_id, vif['address'], instance['uuid']) def plug_ivs(self, instance, vif): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): self.plug_ivs_hybrid(instance, vif) else: self.plug_ivs_ethernet(instance, vif) def plug_mlnx_direct(self, instance, vif): super(LibvirtGenericVIFDriver, self).plug(instance, vif) network = vif['network'] vnic_mac = vif['address'] device_id = instance['uuid'] fabric = network['meta']['physical_network'] dev_name = self.get_vif_devname_with_prefix(vif, DEV_PREFIX_ETH) try: utils.execute('ebrctl', 'add-port', vnic_mac, device_id, fabric, network_model.VIF_TYPE_MLNX_DIRECT, dev_name, run_as_root=True) except processutils.ProcessExecutionError: LOG.exception(_("Failed while plugging vif"), instance=instance) def plug_802qbg(self, instance, vif): super(LibvirtGenericVIFDriver, self).plug(instance, vif) def plug_802qbh(self, instance, vif): super(LibvirtGenericVIFDriver, self).plug(instance, vif) def plug_midonet(self, instance, vif): """Plug into MidoNet's network port Bind the vif to a MidoNet virtual port. """ super(LibvirtGenericVIFDriver, self).plug(instance, vif) dev = self.get_vif_devname(vif) port_id = vif['id'] try: linux_net.create_tap_dev(dev) utils.execute('mm-ctl', '--bind-port', port_id, dev, run_as_root=True) except processutils.ProcessExecutionError: LOG.exception(_("Failed while plugging vif"), instance=instance) def plug_iovisor(self, instance, vif): """Plug using PLUMgrid IO Visor Driver Connect a network device to their respective Virtual Domain in PLUMgrid Platform. """ super(LibvirtGenericVIFDriver, self).plug(instance, vif) dev = self.get_vif_devname(vif) iface_id = vif['id'] linux_net.create_tap_dev(dev) net_id = vif['network']['id'] tenant_id = instance["project_id"] try: utils.execute('ifc_ctl', 'gateway', 'add_port', dev, run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'ifup', dev, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], 'pgtag2=%s' % net_id, 'pgtag1=%s' % tenant_id, run_as_root=True) except processutils.ProcessExecutionError: LOG.exception(_("Failed while plugging vif"), instance=instance) def plug(self, instance, vif): vif_type = vif['type'] LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s'), {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if vif_type is None: raise exception.VirtualInterfacePlugException( _("vif_type parameter must be present " "for this vif_driver implementation")) elif vif_type == network_model.VIF_TYPE_BRIDGE: self.plug_bridge(instance, vif) elif vif_type == network_model.VIF_TYPE_OVS: self.plug_ovs(instance, vif) elif vif_type == network_model.VIF_TYPE_802_QBG: self.plug_802qbg(instance, vif) elif vif_type == network_model.VIF_TYPE_802_QBH: self.plug_802qbh(instance, vif) elif vif_type == network_model.VIF_TYPE_IVS: self.plug_ivs(instance, vif) elif vif_type == network_model.VIF_TYPE_IOVISOR: self.plug_iovisor(instance, vif) elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT: self.plug_mlnx_direct(instance, vif) elif vif_type == network_model.VIF_TYPE_MIDONET: self.plug_midonet(instance, vif) else: raise exception.VirtualInterfacePlugException( _("Unexpected vif_type=%s") % vif_type) def unplug_bridge(self, instance, vif): """No manual unplugging required.""" super(LibvirtGenericVIFDriver, self).unplug(instance, vif) def unplug_ovs_ethernet(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" super(LibvirtGenericVIFDriver, self).unplug(instance, vif) try: linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), self.get_vif_devname(vif)) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_ovs_bridge(self, instance, vif): """No manual unplugging required.""" super(LibvirtGenericVIFDriver, self).unplug(instance, vif) def unplug_ovs_hybrid(self, instance, vif): """UnPlug using hybrid strategy Unhook port from OVS, unhook port from bridge, delete bridge, and delete both veth devices. """ super(LibvirtGenericVIFDriver, self).unplug(instance, vif) try: br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) if linux_net.device_exists(br_name): utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True) utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True) linux_net.delete_ovs_vif_port(self.get_bridge_name(vif), v2_name) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_ovs(self, instance, vif): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): self.unplug_ovs_hybrid(instance, vif) elif self.has_libvirt_version(LIBVIRT_OVS_VPORT_VERSION): self.unplug_ovs_bridge(instance, vif) else: self.unplug_ovs_ethernet(instance, vif) def unplug_ivs_ethernet(self, instance, vif): """Unplug the VIF by deleting the port from the bridge.""" super(LibvirtGenericVIFDriver, self).unplug(instance, vif) try: linux_net.delete_ivs_vif_port(self.get_vif_devname(vif)) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_ivs_hybrid(self, instance, vif): """UnPlug using hybrid strategy (same as OVS) Unhook port from IVS, unhook port from bridge, delete bridge, and delete both veth devices. """ super(LibvirtGenericVIFDriver, self).unplug(instance, vif) try: br_name = self.get_br_name(vif['id']) v1_name, v2_name = self.get_veth_pair_names(vif['id']) utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True) utils.execute('ip', 'link', 'set', br_name, 'down', run_as_root=True) utils.execute('brctl', 'delbr', br_name, run_as_root=True) linux_net.delete_ivs_vif_port(v2_name) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_ivs(self, instance, vif): if self.get_firewall_required(vif) or vif.is_hybrid_plug_enabled(): self.unplug_ivs_hybrid(instance, vif) else: self.unplug_ivs_ethernet(instance, vif) def unplug_mlnx_direct(self, instance, vif): super(LibvirtGenericVIFDriver, self).unplug(instance, vif) network = vif['network'] vnic_mac = vif['address'] fabric = network['meta']['physical_network'] try: utils.execute('ebrctl', 'del-port', fabric, vnic_mac, run_as_root=True) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_802qbg(self, instance, vif): super(LibvirtGenericVIFDriver, self).unplug(instance, vif) def unplug_802qbh(self, instance, vif): super(LibvirtGenericVIFDriver, self).unplug(instance, vif) def unplug_midonet(self, instance, vif): """Unplug from MidoNet network port Unbind the vif from a MidoNet virtual port. """ super(LibvirtGenericVIFDriver, self).unplug(instance, vif) dev = self.get_vif_devname(vif) port_id = vif['id'] try: utils.execute('mm-ctl', '--unbind-port', port_id, run_as_root=True) linux_net.delete_net_dev(dev) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug_iovisor(self, instance, vif): """Unplug using PLUMgrid IO Visor Driver Delete network device and to their respective connection to the Virtual Domain in PLUMgrid Platform. """ super(LibvirtGenericVIFDriver, self).unplug(instance, vif) iface_id = vif['id'] dev = self.get_vif_devname(vif) try: utils.execute('ifc_ctl', 'gateway', 'ifdown', dev, 'access_vm', vif['network']['label'] + "_" + iface_id, vif['address'], run_as_root=True) utils.execute('ifc_ctl', 'gateway', 'del_port', dev, run_as_root=True) linux_net.delete_net_dev(dev) except processutils.ProcessExecutionError: LOG.exception(_("Failed while unplugging vif"), instance=instance) def unplug(self, instance, vif): vif_type = vif['type'] LOG.debug(_('vif_type=%(vif_type)s instance=%(instance)s ' 'vif=%(vif)s'), {'vif_type': vif_type, 'instance': instance, 'vif': vif}) if vif_type is None: raise exception.NovaException( _("vif_type parameter must be present " "for this vif_driver implementation")) elif vif_type == network_model.VIF_TYPE_BRIDGE: self.unplug_bridge(instance, vif) elif vif_type == network_model.VIF_TYPE_OVS: self.unplug_ovs(instance, vif) elif vif_type == network_model.VIF_TYPE_802_QBG: self.unplug_802qbg(instance, vif) elif vif_type == network_model.VIF_TYPE_802_QBH: self.unplug_802qbh(instance, vif) elif vif_type == network_model.VIF_TYPE_IVS: self.unplug_ivs(instance, vif) elif vif_type == network_model.VIF_TYPE_IOVISOR: self.unplug_iovisor(instance, vif) elif vif_type == network_model.VIF_TYPE_MLNX_DIRECT: self.unplug_mlnx_direct(instance, vif) elif vif_type == network_model.VIF_TYPE_MIDONET: self.unplug_midonet(instance, vif) else: raise exception.NovaException( _("Unexpected vif_type=%s") % vif_type) # The following classes were removed in the transition from Havana to # Icehouse, but may still be referenced in configuration files. The # following stubs allow those configurations to work while logging a # deprecation warning. class _LibvirtDeprecatedDriver(LibvirtGenericVIFDriver): def __init__(self, *args, **kwargs): LOG.warn('VIF driver \"%s\" is marked as deprecated and will be ' 'removed in the Juno release.', self.__class__.__name__) super(_LibvirtDeprecatedDriver, self).__init__(*args, **kwargs) class LibvirtBridgeDriver(_LibvirtDeprecatedDriver): pass class LibvirtOpenVswitchDriver(_LibvirtDeprecatedDriver): pass class LibvirtHybridOVSBridgeDriver(_LibvirtDeprecatedDriver): pass class LibvirtOpenVswitchVirtualPortDriver(_LibvirtDeprecatedDriver): pass class NeutronLinuxBridgeVIFDriver(_LibvirtDeprecatedDriver): pass nova-2014.1.5/nova/virt/libvirt/firewall.py0000664000567000056700000003244212540642544021643 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from lxml import etree from oslo.config import cfg from nova.cloudpipe import pipelib from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging import nova.virt.firewall as base_firewall from nova.virt import netutils LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') libvirt = None class NWFilterFirewall(base_firewall.FirewallDriver): """This class implements a network filtering mechanism by using libvirt's nwfilter. all instances get a filter ("nova-base") applied. This filter provides some basic security such as protection against MAC spoofing, IP spoofing, and ARP spoofing. """ def __init__(self, virtapi, get_connection, **kwargs): super(NWFilterFirewall, self).__init__(virtapi) global libvirt if libvirt is None: try: libvirt = __import__('libvirt') except ImportError: LOG.warn(_("Libvirt module could not be loaded. " "NWFilterFirewall will not work correctly.")) self._libvirt_get_connection = get_connection self.static_filters_configured = False self.handle_security_groups = False def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter.""" pass def _get_connection(self): return self._libvirt_get_connection() _conn = property(_get_connection) def nova_no_nd_reflection_filter(self): """This filter protects false positives on IPv6 Duplicate Address Detection(DAD). """ uuid = self._get_filter_uuid('nova-no-nd-reflection') return ''' %s ''' % uuid def nova_dhcp_filter(self): """The standard allow-dhcp-server filter is an one, so it uses ebtables to allow traffic through. Without a corresponding rule in iptables, it'll get blocked anyway. """ uuid = self._get_filter_uuid('nova-allow-dhcp-server') return ''' %s ''' % uuid def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection).""" LOG.info(_('Called setup_basic_filtering in nwfilter'), instance=instance) if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. return LOG.info(_('Ensuring static filters'), instance=instance) self._ensure_static_filters() nodhcp_base_filter = self.get_base_filter_list(instance, False) dhcp_base_filter = self.get_base_filter_list(instance, True) for vif in network_info: _base_filter = nodhcp_base_filter for subnet in vif['network']['subnets']: if subnet.get_meta('dhcp_server'): _base_filter = dhcp_base_filter break self._define_filter(self._get_instance_filter_xml(instance, _base_filter, vif)) def _get_instance_filter_parameters(self, vif): parameters = [] def format_parameter(parameter, value): return ("" % (parameter, value)) network = vif['network'] if not vif['network'] or not vif['network']['subnets']: return parameters v4_subnets = [s for s in network['subnets'] if s['version'] == 4] v6_subnets = [s for s in network['subnets'] if s['version'] == 6] for subnet in v4_subnets: for ip in subnet['ips']: parameters.append(format_parameter('IP', ip['address'])) dhcp_server = subnet.get_meta('dhcp_server') if dhcp_server: parameters.append(format_parameter('DHCPSERVER', dhcp_server)) if CONF.use_ipv6: for subnet in v6_subnets: gateway = subnet.get('gateway') if gateway: ra_server = gateway['address'] + "/128" parameters.append(format_parameter('RASERVER', ra_server)) if CONF.allow_same_net_traffic: for subnet in v4_subnets: ipv4_cidr = subnet['cidr'] net, mask = netutils.get_net_and_mask(ipv4_cidr) parameters.append(format_parameter('PROJNET', net)) parameters.append(format_parameter('PROJMASK', mask)) if CONF.use_ipv6: for subnet in v6_subnets: ipv6_cidr = subnet['cidr'] net, prefix = netutils.get_net_and_prefixlen(ipv6_cidr) parameters.append(format_parameter('PROJNET6', net)) parameters.append(format_parameter('PROJMASK6', prefix)) return parameters def _get_instance_filter_xml(self, instance, filters, vif): nic_id = vif['address'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) parameters = self._get_instance_filter_parameters(vif) uuid = self._get_filter_uuid(instance_filter_name) xml = '''''' % instance_filter_name xml += '%s' % uuid for f in filters: xml += '''''' % f xml += ''.join(parameters) xml += '' xml += '' return xml def get_base_filter_list(self, instance, allow_dhcp): """Obtain a list of base filters to apply to an instance. The return value should be a list of strings, each specifying a filter name. Subclasses can override this function to add additional filters as needed. Additional filters added to the list must also be correctly defined within the subclass. """ if pipelib.is_vpn_image(instance['image_ref']): base_filter = 'nova-vpn' elif allow_dhcp: base_filter = 'nova-base' else: base_filter = 'nova-nodhcp' return [base_filter] def _ensure_static_filters(self): """Static filters are filters that have no need to be IP aware. There is no configuration or tuneability of these filters, so they can be set up once and forgotten about. """ if self.static_filters_configured: return filter_set = ['no-mac-spoofing', 'no-ip-spoofing', 'no-arp-spoofing'] self._define_filter(self.nova_no_nd_reflection_filter()) filter_set.append('nova-no-nd-reflection') self._define_filter(self._filter_container('nova-nodhcp', filter_set)) filter_set.append('allow-dhcp-server') self._define_filter(self._filter_container('nova-base', filter_set)) self._define_filter(self._filter_container('nova-vpn', ['allow-dhcp-server'])) self._define_filter(self.nova_dhcp_filter()) self.static_filters_configured = True def _filter_container(self, name, filters): uuid = self._get_filter_uuid(name) xml = ''' %s %s ''' % (name, uuid, ''.join(["" % (f,) for f in filters])) return xml def _get_filter_uuid(self, name): try: flt = self._conn.nwfilterLookupByName(name) xml = flt.XMLDesc(0) doc = etree.fromstring(xml) u = doc.find("./uuid").text except Exception as e: LOG.debug("Cannot find UUID for filter '%s': '%s'" % (name, e)) u = uuid.uuid4().hex LOG.debug("UUID for filter '%s' is '%s'" % (name, u)) return u def _define_filter(self, xml): if callable(xml): xml = xml() self._conn.nwfilterDefineXML(xml) def unfilter_instance(self, instance, network_info): """Clear out the nwfilter rules.""" instance_name = instance['name'] for vif in network_info: nic_id = vif['address'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) try: _nw = self._conn.nwfilterLookupByName(instance_filter_name) _nw.undefine() except libvirt.libvirtError as e: errcode = e.get_error_code() if errcode == libvirt.VIR_ERR_OPERATION_INVALID: # This happens when the instance filter is still in # use (ie. when the instance has not terminated properly) raise LOG.debug(_('The nwfilter(%s) is not found.'), instance_filter_name, instance=instance) @staticmethod def _instance_filter_name(instance, nic_id=None): if not nic_id: return 'nova-instance-%s' % (instance['name']) return 'nova-instance-%s-%s' % (instance['name'], nic_id) def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists.""" for vif in network_info: nic_id = vif['address'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) try: self._conn.nwfilterLookupByName(instance_filter_name) except libvirt.libvirtError: name = instance['name'] LOG.debug(_('The nwfilter(%(instance_filter_name)s) for' '%(name)s is not found.'), {'instance_filter_name': instance_filter_name, 'name': name}, instance=instance) return False return True class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver): def __init__(self, virtapi, execute=None, **kwargs): super(IptablesFirewallDriver, self).__init__(virtapi, **kwargs) self.nwfilter = NWFilterFirewall(virtapi, kwargs['get_connection']) def setup_basic_filtering(self, instance, network_info): """Set up provider rules and basic NWFilter.""" self.nwfilter.setup_basic_filtering(instance, network_info) if not self.basically_filtered: LOG.debug(_('iptables firewall: Setup Basic Filtering'), instance=instance) self.refresh_provider_fw_rules() self.basically_filtered = True def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter.""" pass def unfilter_instance(self, instance, network_info): # NOTE(salvatore-orlando): # Overriding base class method for applying nwfilter operation if self.instance_info.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() self.nwfilter.unfilter_instance(instance, network_info) else: LOG.info(_('Attempted to unfilter instance which is not ' 'filtered'), instance=instance) def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists.""" return self.nwfilter.instance_filter_exists(instance, network_info) nova-2014.1.5/nova/virt/libvirt/utils.py0000664000567000056700000005541712540642544021205 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright (c) 2011 Piston Cloud Computing, Inc # Copyright (c) 2011 OpenStack Foundation # (c) Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import platform from lxml import etree from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import units from nova import utils from nova.virt import images from nova.virt import volumeutils libvirt_opts = [ cfg.BoolOpt('snapshot_compression', default=False, help='Compress snapshot images when possible. This ' 'currently applies exclusively to qcow2 images', deprecated_group='DEFAULT', deprecated_name='libvirt_snapshot_compression'), ] CONF = cfg.CONF CONF.register_opts(libvirt_opts, 'libvirt') CONF.import_opt('instances_path', 'nova.compute.manager') LOG = logging.getLogger(__name__) def execute(*args, **kwargs): return utils.execute(*args, **kwargs) def get_iscsi_initiator(): return volumeutils.get_iscsi_initiator() def get_fc_hbas(): """Get the Fibre Channel HBA information.""" out = None try: out, err = execute('systool', '-c', 'fc_host', '-v', run_as_root=True) except processutils.ProcessExecutionError as exc: # This handles the case where rootwrap is used # and systool is not installed # 96 = nova.cmd.rootwrap.RC_NOEXECFOUND: if exc.exit_code == 96: LOG.warn(_("systool is not installed")) return [] except OSError as exc: # This handles the case where rootwrap is NOT used # and systool is not installed if exc.errno == errno.ENOENT: LOG.warn(_("systool is not installed")) return [] if out is None: raise RuntimeError(_("Cannot find any Fibre Channel HBAs")) lines = out.split('\n') # ignore the first 2 lines lines = lines[2:] hbas = [] hba = {} lastline = None for line in lines: line = line.strip() # 2 newlines denotes a new hba port if line == '' and lastline == '': if len(hba) > 0: hbas.append(hba) hba = {} else: val = line.split('=') if len(val) == 2: key = val[0].strip().replace(" ", "") value = val[1].strip() hba[key] = value.replace('"', '') lastline = line return hbas def get_fc_hbas_info(): """Get Fibre Channel WWNs and device paths from the system, if any.""" # Note modern linux kernels contain the FC HBA's in /sys # and are obtainable via the systool app hbas = get_fc_hbas() hbas_info = [] for hba in hbas: wwpn = hba['port_name'].replace('0x', '') wwnn = hba['node_name'].replace('0x', '') device_path = hba['ClassDevicepath'] device = hba['ClassDevice'] hbas_info.append({'port_name': wwpn, 'node_name': wwnn, 'host_device': device, 'device_path': device_path}) return hbas_info def get_fc_wwpns(): """Get Fibre Channel WWPNs from the system, if any.""" # Note modern linux kernels contain the FC HBA's in /sys # and are obtainable via the systool app hbas = get_fc_hbas() wwpns = [] if hbas: for hba in hbas: if hba['port_state'] == 'Online': wwpn = hba['port_name'].replace('0x', '') wwpns.append(wwpn) return wwpns def get_fc_wwnns(): """Get Fibre Channel WWNNs from the system, if any.""" # Note modern linux kernels contain the FC HBA's in /sys # and are obtainable via the systool app hbas = get_fc_hbas() wwnns = [] if hbas: for hba in hbas: if hba['port_state'] == 'Online': wwnn = hba['node_name'].replace('0x', '') wwnns.append(wwnn) return wwnns def create_image(disk_format, path, size): """Create a disk image :param disk_format: Disk image format (as known by qemu-img) :param path: Desired location of the disk image :param size: Desired size of disk image. May be given as an int or a string. If given as an int, it will be interpreted as bytes. If it's a string, it should consist of a number with an optional suffix ('K' for Kibibytes, M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes). If no suffix is given, it will be interpreted as bytes. """ execute('qemu-img', 'create', '-f', disk_format, path, size) def create_cow_image(backing_file, path, size=None): """Create COW image Creates a COW image with the given backing file :param backing_file: Existing image on which to base the COW image :param path: Desired location of the COW image """ base_cmd = ['qemu-img', 'create', '-f', 'qcow2'] cow_opts = [] if backing_file: cow_opts += ['backing_file=%s' % backing_file] base_details = images.qemu_img_info(backing_file) else: base_details = None # This doesn't seem to get inherited so force it to... # http://paste.ubuntu.com/1213295/ # TODO(harlowja) probably file a bug against qemu-img/qemu if base_details and base_details.cluster_size is not None: cow_opts += ['cluster_size=%s' % base_details.cluster_size] # For now don't inherit this due the following discussion... # See: http://www.gossamer-threads.com/lists/openstack/dev/10592 # if 'preallocation' in base_details: # cow_opts += ['preallocation=%s' % base_details['preallocation']] if base_details and base_details.encryption: cow_opts += ['encryption=%s' % base_details.encryption] if size is not None: cow_opts += ['size=%s' % size] if cow_opts: # Format as a comma separated list csv_opts = ",".join(cow_opts) cow_opts = ['-o', csv_opts] cmd = base_cmd + cow_opts + [path] execute(*cmd) def create_lvm_image(vg, lv, size, sparse=False): """Create LVM image. Creates a LVM image with given size. :param vg: existing volume group which should hold this image :param lv: name for this image (logical volume) :size: size of image in bytes :sparse: create sparse logical volume """ vg_info = get_volume_group_info(vg) free_space = vg_info['free'] def check_size(vg, lv, size): if size > free_space: raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.' ' Only %(free_space)db available,' ' but %(size)db required' ' by volume %(lv)s.') % {'vg': vg, 'free_space': free_space, 'size': size, 'lv': lv}) if sparse: preallocated_space = 64 * units.Mi check_size(vg, lv, preallocated_space) if free_space < size: LOG.warning(_('Volume group %(vg)s will not be able' ' to hold sparse volume %(lv)s.' ' Virtual volume size is %(size)db,' ' but free space on volume group is' ' only %(free_space)db.'), {'vg': vg, 'free_space': free_space, 'size': size, 'lv': lv}) cmd = ('lvcreate', '-L', '%db' % preallocated_space, '--virtualsize', '%db' % size, '-n', lv, vg) else: check_size(vg, lv, size) cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg) execute(*cmd, run_as_root=True, attempts=3) def import_rbd_image(*args): execute('rbd', 'import', *args) def _run_rbd(*args, **kwargs): total = list(args) if CONF.libvirt.rbd_user: total.extend(['--id', str(CONF.libvirt.rbd_user)]) if CONF.libvirt.images_rbd_ceph_conf: total.extend(['--conf', str(CONF.libvirt.images_rbd_ceph_conf)]) return utils.execute(*total, **kwargs) def list_rbd_volumes(pool): """List volumes names for given ceph pool. :param pool: ceph pool name """ try: out, err = _run_rbd('rbd', '-p', pool, 'ls') except processutils.ProcessExecutionError: # No problem when no volume in rbd pool return [] return [line.strip() for line in out.splitlines()] def remove_rbd_volumes(pool, *names): """Remove one or more rbd volume.""" for name in names: rbd_remove = ['rbd', '-p', pool, 'rm', name] try: _run_rbd(*rbd_remove, attempts=3, run_as_root=True) except processutils.ProcessExecutionError: LOG.warn(_("rbd remove %(name)s in pool %(pool)s failed"), {'name': name, 'pool': pool}) def get_volume_group_info(vg): """Return free/used/total space info for a volume group in bytes :param vg: volume group name :returns: A dict containing: :total: How big the filesystem is (in bytes) :free: How much space is free (in bytes) :used: How much space is used (in bytes) """ out, err = execute('vgs', '--noheadings', '--nosuffix', '--separator', '|', '--units', 'b', '-o', 'vg_size,vg_free', vg, run_as_root=True) info = out.split('|') if len(info) != 2: raise RuntimeError(_("vg %s must be LVM volume group") % vg) return {'total': int(info[0]), 'free': int(info[1]), 'used': int(info[0]) - int(info[1])} def list_logical_volumes(vg): """List logical volumes paths for given volume group. :param vg: volume group name """ out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg, run_as_root=True) return [line.strip() for line in out.splitlines()] def logical_volume_info(path): """Get logical volume info. :param path: logical volume path """ out, err = execute('lvs', '-o', 'vg_all,lv_all', '--separator', '|', path, run_as_root=True) info = [line.split('|') for line in out.splitlines()] if len(info) != 2: raise RuntimeError(_("Path %s must be LVM logical volume") % path) return dict(zip(*info)) def logical_volume_size(path): """Get logical volume size in bytes. :param path: logical volume path """ out, _err = execute('blockdev', '--getsize64', path, run_as_root=True) return int(out) def _zero_logical_volume(path, volume_size): """Write zeros over the specified path :param path: logical volume path :param size: number of zeros to write """ bs = units.Mi direct_flags = ('oflag=direct',) sync_flags = () remaining_bytes = volume_size # The loop efficiently writes zeros using dd, # and caters for versions of dd that don't have # the easier to use iflag=count_bytes option. while remaining_bytes: zero_blocks = remaining_bytes / bs seek_blocks = (volume_size - remaining_bytes) / bs zero_cmd = ('dd', 'bs=%s' % bs, 'if=/dev/zero', 'of=%s' % path, 'seek=%s' % seek_blocks, 'count=%s' % zero_blocks) zero_cmd += direct_flags zero_cmd += sync_flags if zero_blocks: utils.execute(*zero_cmd, run_as_root=True) remaining_bytes %= bs bs /= units.Ki # Limit to 3 iterations # Use O_DIRECT with initial block size and fdatasync otherwise direct_flags = () sync_flags = ('conv=fdatasync',) def clear_logical_volume(path): """Obfuscate the logical volume. :param path: logical volume path """ volume_clear = CONF.libvirt.volume_clear if volume_clear not in ('none', 'shred', 'zero'): LOG.error(_("ignoring unrecognized volume_clear='%s' value"), volume_clear) volume_clear = 'zero' if volume_clear == 'none': return volume_clear_size = int(CONF.libvirt.volume_clear_size) * units.Mi volume_size = logical_volume_size(path) if volume_clear_size != 0 and volume_clear_size < volume_size: volume_size = volume_clear_size if volume_clear == 'zero': # NOTE(p-draigbrady): we could use shred to do the zeroing # with -n0 -z, however only versions >= 8.22 perform as well as dd _zero_logical_volume(path, volume_size) elif volume_clear == 'shred': utils.execute('shred', '-n3', '-s%d' % volume_size, path, run_as_root=True) else: raise exception.Invalid(_("volume_clear='%s' is not handled") % volume_clear) def remove_logical_volumes(*paths): """Remove one or more logical volume.""" for path in paths: clear_logical_volume(path) if paths: lvremove = ('lvremove', '-f') + paths execute(*lvremove, attempts=3, run_as_root=True) def pick_disk_driver_name(hypervisor_version, is_block_dev=False): """Pick the libvirt primary backend driver name If the hypervisor supports multiple backend drivers, then the name attribute selects the primary backend driver name, while the optional type attribute provides the sub-type. For example, xen supports a name of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2", while qemu only supports a name of "qemu", but multiple types including "raw", "bochs", "qcow2", and "qed". :param is_block_dev: :returns: driver_name or None """ if CONF.libvirt.virt_type == "xen": if is_block_dev: return "phy" else: # 4000000 == 4.0.0 if hypervisor_version == 4000000: return "tap" else: return "tap2" elif CONF.libvirt.virt_type in ('kvm', 'qemu'): return "qemu" else: # UML doesn't want a driver_name set return None def get_disk_size(path): """Get the (virtual) size of a disk image :param path: Path to the disk image :returns: Size (in bytes) of the given disk image as it would be seen by a virtual machine. """ size = images.qemu_img_info(path).virtual_size return int(size) def get_disk_backing_file(path, basename=True): """Get the backing file of a disk image :param path: Path to the disk image :returns: a path to the image's backing store """ backing_file = images.qemu_img_info(path).backing_file if backing_file and basename: backing_file = os.path.basename(backing_file) return backing_file def copy_image(src, dest, host=None): """Copy a disk image to an existing directory :param src: Source image :param dest: Destination path :param host: Remote host """ if not host: # We shell out to cp because that will intelligently copy # sparse files. I.E. holes will not be written to DEST, # rather recreated efficiently. In addition, since # coreutils 8.11, holes can be read efficiently too. execute('cp', src, dest) else: dest = "%s:%s" % (host, dest) # Try rsync first as that can compress and create sparse dest files. # Note however that rsync currently doesn't read sparse files # efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918 # At least network traffic is mitigated with compression. try: # Do a relatively light weight test first, so that we # can fall back to scp, without having run out of space # on the destination for example. execute('rsync', '--sparse', '--compress', '--dry-run', src, dest) except processutils.ProcessExecutionError: execute('scp', src, dest) else: execute('rsync', '--sparse', '--compress', src, dest) def write_to_file(path, contents, umask=None): """Write the given contents to a file :param path: Destination file :param contents: Desired contents of the file :param umask: Umask to set when creating this file (will be reset) """ if umask: saved_umask = os.umask(umask) try: with open(path, 'w') as f: f.write(contents) finally: if umask: os.umask(saved_umask) def chown(path, owner): """Change ownership of file or directory :param path: File or directory whose ownership to change :param owner: Desired new owner (given as uid or username) """ execute('chown', owner, path, run_as_root=True) def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt): """Extract a snapshot from a disk image. Note that nobody should write to the disk image during this operation. :param disk_path: Path to disk image :param out_path: Desired path of extracted snapshot """ # NOTE(markmc): ISO is just raw to qemu-img if dest_fmt == 'iso': dest_fmt = 'raw' qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt) # Conditionally enable compression of snapshots. if CONF.libvirt.snapshot_compression and dest_fmt == "qcow2": qemu_img_cmd += ('-c',) qemu_img_cmd += (disk_path, out_path) execute(*qemu_img_cmd) def load_file(path): """Read contents of file :param path: File to read """ with open(path, 'r') as fp: return fp.read() def file_open(*args, **kwargs): """Open file see built-in file() documentation for more details Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return file(*args, **kwargs) def file_delete(path): """Delete (unlink) file Note: The reason this is kept in a separate module is to easily be able to provide a stub module that doesn't alter system state at all (for unit tests) """ return os.unlink(path) def find_disk(virt_dom): """Find root device path for instance May be file or device """ xml_desc = virt_dom.XMLDesc(0) domain = etree.fromstring(xml_desc) if CONF.libvirt.virt_type == 'lxc': source = domain.find('devices/filesystem/source') disk_path = source.get('dir') disk_path = disk_path[0:disk_path.rfind('rootfs')] disk_path = os.path.join(disk_path, 'disk') else: source = domain.find('devices/disk/source') disk_path = source.get('file') or source.get('dev') if not disk_path and CONF.libvirt.images_type == 'rbd': disk_path = source.get('name') if disk_path: disk_path = 'rbd:' + disk_path if not disk_path: raise RuntimeError(_("Can't retrieve root device path " "from instance libvirt configuration")) return disk_path def get_disk_type(path): """Retrieve disk type (raw, qcow2, lvm) for given file.""" if path.startswith('/dev'): return 'lvm' elif path.startswith('rbd:'): return 'rbd' return images.qemu_img_info(path).file_format def get_fs_info(path): """Get free/used/total space info for a filesystem :param path: Any dirent on the filesystem :returns: A dict containing: :free: How much space is free (in bytes) :used: How much space is used (in bytes) :total: How big the filesystem is (in bytes) """ hddinfo = os.statvfs(path) total = hddinfo.f_frsize * hddinfo.f_blocks free = hddinfo.f_frsize * hddinfo.f_bavail used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree) return {'total': total, 'free': free, 'used': used} def fetch_image(context, target, image_id, user_id, project_id, max_size=0): """Grab image.""" images.fetch_to_raw(context, image_id, target, user_id, project_id, max_size=max_size) def get_instance_path(instance, forceold=False, relative=False): """Determine the correct path for instance storage. This method determines the directory name for instance storage, while handling the fact that we changed the naming style to something more unique in the grizzly release. :param instance: the instance we want a path for :param forceold: force the use of the pre-grizzly format :param relative: if True, just the relative path is returned :returns: a path to store information about that instance """ pre_grizzly_name = os.path.join(CONF.instances_path, instance['name']) if forceold or os.path.exists(pre_grizzly_name): if relative: return instance['name'] return pre_grizzly_name if relative: return instance['uuid'] return os.path.join(CONF.instances_path, instance['uuid']) def get_arch(image_meta): """Determine the architecture of the guest (or host). This method determines the CPU architecture that must be supported by the hypervisor. It gets the (guest) arch info from image_meta properties, and it will fallback to the nova-compute (host) arch if no architecture info is provided in image_meta. :param image_meta: the metadata associated with the instance image :returns: guest (or host) architecture """ if image_meta: arch = image_meta.get('properties', {}).get('architecture') if arch is not None: return arch return platform.processor() def is_mounted(mount_path, source=None): """Check if the given source is mounted at given destination point.""" try: check_cmd = ['findmnt', '--target', mount_path] if source: check_cmd.extend(['--source', source]) utils.execute(*check_cmd) return True except processutils.ProcessExecutionError as exc: return False except OSError as exc: #info since it's not required to have this tool. if exc.errno == errno.ENOENT: LOG.info(_("findmnt tool is not installed")) return False nova-2014.1.5/nova/virt/libvirt/designer.py0000664000567000056700000001022512540642544021631 0ustar jenkinsjenkins00000000000000# Copyright (C) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Policy based configuration of libvirt objects This module provides helper APIs for populating the config.py classes based on common operational needs / policies """ def set_vif_guest_frontend_config(conf, mac, model, driver): """Populate a LibvirtConfigGuestInterface instance with guest frontend details. """ conf.mac_addr = mac if model is not None: conf.model = model if driver is not None: conf.driver_name = driver def set_vif_host_backend_bridge_config(conf, brname, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for a software bridge. """ conf.net_type = "bridge" conf.source_dev = brname if tapname: conf.target_dev = tapname conf.script = "" def set_vif_host_backend_ethernet_config(conf, tapname): """Populate a LibvirtConfigGuestInterface instance with host backend details for an externally configured host device. NB use of this configuration is discouraged by libvirt project and will mark domains as 'tainted'. """ conf.net_type = "ethernet" conf.target_dev = tapname conf.script = "" def set_vif_host_backend_ovs_config(conf, brname, interfaceid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an OpenVSwitch bridge. """ conf.net_type = "bridge" conf.source_dev = brname conf.vporttype = "openvswitch" conf.add_vport_param("interfaceid", interfaceid) if tapname: conf.target_dev = tapname conf.script = "" def set_vif_host_backend_802qbg_config(conf, devname, managerid, typeid, typeidversion, instanceid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an 802.1qbg device. """ conf.net_type = "direct" conf.source_dev = devname conf.source_mode = "vepa" conf.vporttype = "802.1Qbg" conf.add_vport_param("managerid", managerid) conf.add_vport_param("typeid", typeid) conf.add_vport_param("typeidversion", typeidversion) conf.add_vport_param("instanceid", instanceid) if tapname: conf.target_dev = tapname def set_vif_host_backend_802qbh_config(conf, devname, profileid, tapname=None): """Populate a LibvirtConfigGuestInterface instance with host backend details for an 802.1qbh device. """ conf.net_type = "direct" conf.source_dev = devname conf.source_mode = "vepa" conf.vporttype = "802.1Qbh" conf.add_vport_param("profileid", profileid) if tapname: conf.target_dev = tapname def set_vif_host_backend_direct_config(conf, devname): """Populate a LibvirtConfigGuestInterface instance with direct Interface. """ conf.net_type = "direct" conf.source_mode = "passthrough" conf.source_dev = devname conf.model = "virtio" def set_vif_bandwidth_config(conf, inst_type): """Config vif inbound/outbound bandwidth limit. parameters are set in instance_type_extra_specs table, key is in the format quota:vif_inbound_average. """ bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak', 'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak', 'vif_outbound_burst'] for key, value in inst_type.get('extra_specs', {}).iteritems(): scope = key.split(':') if len(scope) > 1 and scope[0] == 'quota': if scope[1] in bandwidth_items: setattr(conf, scope[1], value) nova-2014.1.5/nova/virt/baremetal/0000775000567000056700000000000012540643452017737 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/baremetal/db/0000775000567000056700000000000012540643452020324 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/0000775000567000056700000000000012540643452022466 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/0000775000567000056700000000000012540643452025143 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/migrate.cfg0000664000567000056700000000172712540642544027264 0ustar jenkinsjenkins00000000000000[db_settings] # Used to identify which repository this database is versioned under. # You can use the name of your project. repository_id=nova_bm # The name of the database table used to track the schema version. # This name shouldn't already be used by your project. # If this is changed once a database is under version control, you'll need to # change the table name in each database too. version_table=migrate_version # When committing a change script, Migrate will attempt to generate the # sql for all supported databases; normally, if one of them fails - probably # because you don't have that database installed - it is ignored and the # commit continues, perhaps ending successfully. # Databases in this list MUST compile successfully during a commit, or the # entire commit will fail. List the databases your application will actually # be using to ensure your updates to that database work properly. # This must be a list; example: ['postgres','sqlite'] required_dbs=[] nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/0000775000567000056700000000000012540643452027013 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/001_init.py0000664000567000056700000001047612540642544030721 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean, Column, DateTime from sqlalchemy import Index, Integer, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine bm_nodes = Table('bm_nodes', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('cpus', Integer), Column('memory_mb', Integer), Column('local_gb', Integer), Column('pm_address', String(length=255)), Column('pm_user', String(length=255)), Column('pm_password', String(length=255)), Column('service_host', String(length=255)), Column('prov_mac_address', String(length=255)), Column('instance_uuid', String(length=36)), Column('registration_status', String(length=16)), Column('task_state', String(length=255)), Column('prov_vlan_id', Integer), Column('terminal_port', Integer), mysql_engine='InnoDB', #mysql_charset='utf8' ) bm_interfaces = Table('bm_interfaces', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('bm_node_id', Integer), Column('address', String(length=255), unique=True), Column('datapath_id', String(length=255)), Column('port_no', Integer), Column('vif_uuid', String(length=36), unique=True), mysql_engine='InnoDB', #mysql_charset='utf8' ) bm_pxe_ips = Table('bm_pxe_ips', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=255), unique=True), Column('bm_node_id', Integer), Column('server_address', String(length=255), unique=True), mysql_engine='InnoDB', #mysql_charset='utf8' ) bm_deployments = Table('bm_deployments', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('bm_node_id', Integer), Column('key', String(length=255)), Column('image_path', String(length=255)), Column('pxe_config_path', String(length=255)), Column('root_mb', Integer), Column('swap_mb', Integer), mysql_engine='InnoDB', #mysql_charset='utf8' ) bm_nodes.create() bm_interfaces.create() bm_pxe_ips.create() bm_deployments.create() Index('idx_bm_nodes_service_host_deleted', bm_nodes.c.service_host, bm_nodes.c.deleted)\ .create(migrate_engine) Index('idx_bm_nodes_instance_uuid_deleted', bm_nodes.c.instance_uuid, bm_nodes.c.deleted)\ .create(migrate_engine) Index('idx_bm_nodes_hmcld', bm_nodes.c.service_host, bm_nodes.c.memory_mb, bm_nodes.c.cpus, bm_nodes.c.local_gb, bm_nodes.c.deleted)\ .create(migrate_engine) Index('idx_bm_interfaces_bm_node_id_deleted', bm_interfaces.c.bm_node_id, bm_interfaces.c.deleted)\ .create(migrate_engine) Index('idx_bm_pxe_ips_bm_node_id_deleted', bm_pxe_ips.c.bm_node_id, bm_pxe_ips.c.deleted)\ .create(migrate_engine) def downgrade(migrate_engine): raise NotImplementedError('Downgrade from 001_init is unsupported.') ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/009_add_ephemeral_mb_to_bm_nodes.pynova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/009_add_ephemeral_mb_to_bm_nod0000664000567000056700000000212312540642544034575 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Integer, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('bm_nodes', meta, autoload=True) ephemeral_mb_col = Column('ephemeral_mb', Integer) t.create_column(ephemeral_mb_col) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('bm_nodes', meta, autoload=True) t.drop_column('ephemeral_mb') nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/002_drop_bm_deployments.py0000664000567000056700000000447112540642544034022 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, Index, MetaData, Table from sqlalchemy import Integer, String, DateTime, Boolean def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine bm_nodes = Table('bm_nodes', meta, autoload=True) image_path = Column('image_path', String(length=255)) pxe_config_path = Column('pxe_config_path', String(length=255)) deploy_key = Column('deploy_key', String(length=255)) root_mb = Column('root_mb', Integer()) swap_mb = Column('swap_mb', Integer()) for c in [image_path, pxe_config_path, deploy_key, root_mb, swap_mb]: bm_nodes.create_column(c) deploy_key_idx = Index('deploy_key_idx', bm_nodes.c.deploy_key) deploy_key_idx.create(migrate_engine) bm_deployments = Table('bm_deployments', meta, autoload=True) bm_deployments.drop() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine bm_nodes = Table('bm_nodes', meta, autoload=True) for c in ['image_path', 'pxe_config_path', 'deploy_key', 'root_mb', 'swap_mb']: bm_nodes.drop_column(c) bm_deployments = Table('bm_deployments', meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('bm_node_id', Integer), Column('key', String(length=255)), Column('image_path', String(length=255)), Column('pxe_config_path', String(length=255)), Column('root_mb', Integer), Column('swap_mb', Integer), mysql_engine='InnoDB', ) bm_deployments.create() nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/003_add_uuid_to_bm_nodes.py0000664000567000056700000000222612540642544034100 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table, Index def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('bm_nodes', meta, autoload=True) uuid_col = Column('uuid', String(36)) t.create_column(uuid_col) uuid_ux = Index('uuid_ux', t.c.uuid, unique=True) uuid_ux.create(migrate_engine) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('bm_nodes', meta, autoload=True) t.drop_column('uuid') nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/007_drop_prov_mac_address.py0000664000567000056700000000226312540642544034316 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 NTT DOCOMO, INC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine nodes = Table('bm_nodes', meta, autoload=True) nodes.drop_column('prov_mac_address') def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine nodes = Table('bm_nodes', meta, autoload=True) nodes.create_column(Column('prov_mac_address', String(length=255))) # NOTE(arata): The values held by prov_mac_address are lost in upgrade. # So downgrade has no other choice but to set the column to NULL. ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/005_drop_unused_columns_from_nodes.pynova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/005_drop_unused_columns_from_n0000664000567000056700000000221512540642544034752 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 NTT DOCOMO, INC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, String, Integer, MetaData, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine nodes = Table('bm_nodes', meta, autoload=True) nodes.drop_column('prov_vlan_id') nodes.drop_column('registration_status') def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine nodes = Table('bm_nodes', meta, autoload=True) nodes.create_column(Column('prov_vlan_id', Integer)) nodes.create_column(Column('registration_status', String(length=16))) nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/__init__.py0000664000567000056700000000117312540642544031127 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/010_add_preserve_ephemeral.py0000664000567000056700000000346612540642544034444 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Boolean, Table, text COLUMN_NAME = 'preserve_ephemeral' TABLE_NAME = 'bm_nodes' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table(TABLE_NAME, meta, autoload=True) default = text('0') if migrate_engine.name == 'sqlite' else text('false') preserve_ephemeral_col = Column(COLUMN_NAME, Boolean, server_default=default) t.create_column(preserve_ephemeral_col) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table(TABLE_NAME, meta, autoload=True) # NOTE(rpodolyaka): SQLite doesn't have native BOOLEAN type, so it's # emulated by adding a CHECK constraint. We must # explicitly omit that constraint here so we don't # receive 'no such column' error when dropping the # column if migrate_engine.name == 'sqlite': t.constraints = set([ c for c in t.constraints if not (hasattr(c, 'sqltext') and COLUMN_NAME in str(c.sqltext)) ]) t.drop_column(COLUMN_NAME) nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/006_move_prov_mac_address.py0000664000567000056700000000541012540642544034314 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 NTT DOCOMO, INC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.openstack.common import log as logging from sqlalchemy import and_, MetaData, select, Table, exists from sqlalchemy import exc LOG = logging.getLogger(__name__) def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine nodes = Table('bm_nodes', meta, autoload=True) ifs = Table('bm_interfaces', meta, autoload=True) q = select([nodes.c.id, nodes.c.prov_mac_address], from_obj=nodes) # Iterate all elements before starting insert since IntegrityError # may disturb the iteration. node_address = {} for node_id, address in q.execute(): node_address[node_id] = address i = ifs.insert() for node_id, address in node_address.iteritems(): try: i.execute({'bm_node_id': node_id, 'address': address}) except exc.IntegrityError: # The address is registered in both bm_nodes and bm_interfaces. # It is expected. pass def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine nodes = Table('bm_nodes', meta, autoload=True) ifs = Table('bm_interfaces', meta, autoload=True) subq = exists().where(and_(ifs.c.bm_node_id == nodes.c.id, ifs.c.address == nodes.c.prov_mac_address)) ifs.delete().where(subq).execute() # NOTE(arata): # In fact, this downgrade may not return the db to the previous state. # It seems to be not so match a problem, so this is just for memo. # # Think these two state before upgrading: # # (A) address 'x' is duplicate # bm_nodes.prov_mac_address='x' # bm_interfaces.address=['x', 'y'] # # (B) no address is duplicate # bm_nodes.prov_mac_address='x' # bm_interfaces.address=['y'] # # Upgrading them results in the same state: # # bm_nodes.prov_mac_address='x' # bm_interfaces.address=['x', 'y'] # # Downgrading this results in B, even if the actual initial status was A # Of course we can change it to downgrade to B, but then we cannot # downgrade to A; it is an exclusive choice since we do not have # information about the initial state. nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/008_remove_bm_pxe_ips_table.py0000664000567000056700000000365112540642544034633 0ustar jenkinsjenkins00000000000000# Copyright 2013 Mirantis Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import Index from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import String from sqlalchemy import Table table_name = 'bm_pxe_ips' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine table = Table(table_name, meta, autoload=True) table.drop() def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine bm_pxe_ips = Table(table_name, meta, Column('created_at', DateTime), Column('updated_at', DateTime), Column('deleted_at', DateTime), Column('deleted', Boolean), Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=255), unique=True), Column('bm_node_id', Integer), Column('server_address', String(length=255), unique=True), mysql_engine='InnoDB', ) bm_pxe_ips.create() Index( 'idx_bm_pxe_ips_bm_node_id_deleted', bm_pxe_ips.c.bm_node_id, bm_pxe_ips.c.deleted ).create(migrate_engine) ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/004_add_instance_name_to_bm_nodes.pynova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/versions/004_add_instance_name_to_bm_no0000664000567000056700000000210712540642544034612 0ustar jenkinsjenkins00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('bm_nodes', meta, autoload=True) name_col = Column('instance_name', String(255)) t.create_column(name_col) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine t = Table('bm_nodes', meta, autoload=True) t.drop_column('instance_name') nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migrate_repo/__init__.py0000664000567000056700000000117312540642544027257 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/models.py0000664000567000056700000000435212540642544024330 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for baremetal data. """ from sqlalchemy import Column, Boolean, Integer, String from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import ForeignKey, Text from nova.db.sqlalchemy import models BASE = declarative_base() class BareMetalNode(BASE, models.NovaBase): """Represents a bare metal node.""" __tablename__ = 'bm_nodes' id = Column(Integer, primary_key=True) deleted = Column(Boolean, default=False) uuid = Column(String(36)) service_host = Column(String(255)) instance_uuid = Column(String(36)) instance_name = Column(String(255)) cpus = Column(Integer) memory_mb = Column(Integer) local_gb = Column(Integer) preserve_ephemeral = Column(Boolean) pm_address = Column(Text) pm_user = Column(Text) pm_password = Column(Text) task_state = Column(String(255)) terminal_port = Column(Integer) image_path = Column(String(255)) pxe_config_path = Column(String(255)) deploy_key = Column(String(255)) # root_mb, swap_mb and ephemeral_mb are cached flavor values for the # current deployment not attributes of the node. root_mb = Column(Integer) swap_mb = Column(Integer) ephemeral_mb = Column(Integer) class BareMetalInterface(BASE, models.NovaBase): __tablename__ = 'bm_interfaces' id = Column(Integer, primary_key=True) deleted = Column(Boolean, default=False) bm_node_id = Column(Integer, ForeignKey('bm_nodes.id')) address = Column(String(255), unique=True) datapath_id = Column(String(255)) port_no = Column(Integer) vif_uuid = Column(String(36), unique=True) nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/session.py0000664000567000056700000000402712540642544024527 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Session Handling for SQLAlchemy backend.""" from oslo.config import cfg from nova.openstack.common.db.sqlalchemy import session as db_session from nova import paths opts = [ cfg.StrOpt('sql_connection', default=('sqlite:///' + paths.state_path_def('baremetal_nova.sqlite')), help='The SQLAlchemy connection string used to connect to the ' 'bare-metal database'), ] baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(opts, baremetal_group) _FACADE = None def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = db_session.EngineFacade(CONF.baremetal.sql_connection, **dict(CONF.database.iteritems())) return _FACADE def get_session(autocommit=True, expire_on_commit=False): """Return a SQLAlchemy session.""" facade = _create_facade_lazily() return facade.get_session(autocommit=autocommit, expire_on_commit=expire_on_commit) def get_engine(): """Return a SQLAlchemy engine.""" facade = _create_facade_lazily() return facade.get_engine() nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/__init__.py0000664000567000056700000000117312540642544024602 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/api.py0000664000567000056700000002653312540642544023623 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import uuid from sqlalchemy.sql.expression import asc from sqlalchemy.sql.expression import literal_column import nova.context from nova.db.sqlalchemy import api as sqlalchemy_api from nova import exception from nova.openstack.common.db import exception as db_exc from nova.openstack.common.gettextutils import _ from nova.openstack.common import timeutils from nova.openstack.common import uuidutils from nova.virt.baremetal.db.sqlalchemy import models from nova.virt.baremetal.db.sqlalchemy import session as db_session def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or db_session.get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(*args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and nova.context.is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def _save(ref, session=None): if not session: session = db_session.get_session() # We must not call ref.save() with session=None, otherwise NovaBase # uses nova-db's session, which cannot access bm-db. ref.save(session=session) def _build_node_order_by(query): query = query.order_by(asc(models.BareMetalNode.memory_mb)) query = query.order_by(asc(models.BareMetalNode.cpus)) query = query.order_by(asc(models.BareMetalNode.local_gb)) return query @sqlalchemy_api.require_admin_context def bm_node_get_all(context, service_host=None): query = model_query(context, models.BareMetalNode, read_deleted="no") if service_host: query = query.filter_by(service_host=service_host) return query.all() @sqlalchemy_api.require_admin_context def bm_node_get_associated(context, service_host=None): query = model_query(context, models.BareMetalNode, read_deleted="no").\ filter(models.BareMetalNode.instance_uuid != None) if service_host: query = query.filter_by(service_host=service_host) return query.all() @sqlalchemy_api.require_admin_context def bm_node_get_unassociated(context, service_host=None): query = model_query(context, models.BareMetalNode, read_deleted="no").\ filter(models.BareMetalNode.instance_uuid == None) if service_host: query = query.filter_by(service_host=service_host) return query.all() @sqlalchemy_api.require_admin_context def bm_node_find_free(context, service_host=None, cpus=None, memory_mb=None, local_gb=None): query = model_query(context, models.BareMetalNode, read_deleted="no") query = query.filter(models.BareMetalNode.instance_uuid == None) if service_host: query = query.filter_by(service_host=service_host) if cpus is not None: query = query.filter(models.BareMetalNode.cpus >= cpus) if memory_mb is not None: query = query.filter(models.BareMetalNode.memory_mb >= memory_mb) if local_gb is not None: query = query.filter(models.BareMetalNode.local_gb >= local_gb) query = _build_node_order_by(query) return query.first() @sqlalchemy_api.require_admin_context def bm_node_get(context, bm_node_id): # bm_node_id may be passed as a string. Convert to INT to improve DB perf. bm_node_id = int(bm_node_id) result = model_query(context, models.BareMetalNode, read_deleted="no").\ filter_by(id=bm_node_id).\ first() if not result: raise exception.NodeNotFound(node_id=bm_node_id) return result @sqlalchemy_api.require_admin_context def bm_node_get_by_instance_uuid(context, instance_uuid): if not uuidutils.is_uuid_like(instance_uuid): raise exception.InstanceNotFound(instance_id=instance_uuid) result = model_query(context, models.BareMetalNode, read_deleted="no").\ filter_by(instance_uuid=instance_uuid).\ first() if not result: raise exception.InstanceNotFound(instance_id=instance_uuid) return result @sqlalchemy_api.require_admin_context def bm_node_get_by_node_uuid(context, bm_node_uuid): result = model_query(context, models.BareMetalNode, read_deleted="no").\ filter_by(uuid=bm_node_uuid).\ first() if not result: raise exception.NodeNotFoundByUUID(node_uuid=bm_node_uuid) return result @sqlalchemy_api.require_admin_context def bm_node_create(context, values): if not values.get('uuid'): values['uuid'] = str(uuid.uuid4()) bm_node_ref = models.BareMetalNode() bm_node_ref.update(values) _save(bm_node_ref) return bm_node_ref @sqlalchemy_api.require_admin_context def bm_node_update(context, bm_node_id, values): rows = model_query(context, models.BareMetalNode, read_deleted="no").\ filter_by(id=bm_node_id).\ update(values) if not rows: raise exception.NodeNotFound(node_id=bm_node_id) @sqlalchemy_api.require_admin_context def bm_node_associate_and_update(context, node_uuid, values): """Associate an instance to a node safely Associate an instance to a node only if that node is not yet assocated. Allow the caller to set any other fields they require in the same operation. For example, this is used to set the node's task_state to BUILDING at the beginning of driver.spawn(). """ if 'instance_uuid' not in values: raise exception.NovaException(_( "instance_uuid must be supplied to bm_node_associate_and_update")) session = db_session.get_session() with session.begin(): query = model_query(context, models.BareMetalNode, session=session, read_deleted="no").\ filter_by(uuid=node_uuid) count = query.filter_by(instance_uuid=None).\ update(values, synchronize_session=False) if count != 1: raise exception.NovaException(_( "Failed to associate instance %(i_uuid)s to baremetal node " "%(n_uuid)s.") % {'i_uuid': values['instance_uuid'], 'n_uuid': node_uuid}) ref = query.first() return ref @sqlalchemy_api.require_admin_context def bm_node_destroy(context, bm_node_id): # First, delete all interfaces belonging to the node. # Delete physically since these have unique columns. session = db_session.get_session() with session.begin(): model_query(context, models.BareMetalInterface, read_deleted="no").\ filter_by(bm_node_id=bm_node_id).\ delete() rows = model_query(context, models.BareMetalNode, read_deleted="no").\ filter_by(id=bm_node_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) if not rows: raise exception.NodeNotFound(node_id=bm_node_id) @sqlalchemy_api.require_admin_context def bm_interface_get(context, if_id): result = model_query(context, models.BareMetalInterface, read_deleted="no").\ filter_by(id=if_id).\ first() if not result: raise exception.NovaException(_("Baremetal interface %s " "not found") % if_id) return result @sqlalchemy_api.require_admin_context def bm_interface_get_all(context): query = model_query(context, models.BareMetalInterface, read_deleted="no") return query.all() @sqlalchemy_api.require_admin_context def bm_interface_destroy(context, if_id): # Delete physically since it has unique columns model_query(context, models.BareMetalInterface, read_deleted="no").\ filter_by(id=if_id).\ delete() @sqlalchemy_api.require_admin_context def bm_interface_create(context, bm_node_id, address, datapath_id, port_no): ref = models.BareMetalInterface() ref.bm_node_id = bm_node_id ref.address = address ref.datapath_id = datapath_id ref.port_no = port_no _save(ref) return ref.id @sqlalchemy_api.require_admin_context def bm_interface_set_vif_uuid(context, if_id, vif_uuid): session = db_session.get_session() with session.begin(): bm_interface = model_query(context, models.BareMetalInterface, read_deleted="no", session=session).\ filter_by(id=if_id).\ with_lockmode('update').\ first() if not bm_interface: raise exception.NovaException(_("Baremetal interface %s " "not found") % if_id) bm_interface.vif_uuid = vif_uuid try: session.add(bm_interface) session.flush() except db_exc.DBError as e: # TODO(deva): clean up when db layer raises DuplicateKeyError if str(e).find('IntegrityError') != -1: raise exception.NovaException(_("Baremetal interface %s " "already in use") % vif_uuid) raise @sqlalchemy_api.require_admin_context def bm_interface_get_by_vif_uuid(context, vif_uuid): result = model_query(context, models.BareMetalInterface, read_deleted="no").\ filter_by(vif_uuid=vif_uuid).\ first() if not result: raise exception.NovaException(_("Baremetal virtual interface %s " "not found") % vif_uuid) return result @sqlalchemy_api.require_admin_context def bm_interface_get_all_by_bm_node_id(context, bm_node_id): result = model_query(context, models.BareMetalInterface, read_deleted="no").\ filter_by(bm_node_id=bm_node_id).\ all() if not result: raise exception.NodeNotFound(node_id=bm_node_id) return result nova-2014.1.5/nova/virt/baremetal/db/sqlalchemy/migration.py0000664000567000056700000000560512540642544025040 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from migrate import exceptions as versioning_exceptions from migrate.versioning import api as versioning_api from migrate.versioning.repository import Repository import sqlalchemy from nova import exception from nova.openstack.common.gettextutils import _ from nova.virt.baremetal.db.sqlalchemy import session INIT_VERSION = 0 _REPOSITORY = None def db_sync(version=None): if version is not None: try: version = int(version) except ValueError: raise exception.NovaException(_("version should be an integer")) current_version = db_version() repository = _find_migrate_repo() if version is None or version > current_version: return versioning_api.upgrade(session.get_engine(), repository, version) else: return versioning_api.downgrade(session.get_engine(), repository, version) def db_version(): repository = _find_migrate_repo() try: return versioning_api.db_version(session.get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError: meta = sqlalchemy.MetaData() engine = session.get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: db_version_control(INIT_VERSION) return versioning_api.db_version(session.get_engine(), repository) else: # Some pre-Essex DB's may not be version controlled. # Require them to upgrade using Essex first. raise exception.NovaException( _("Upgrade DB using Essex release first.")) def db_initial_version(): return INIT_VERSION def db_version_control(version=None): repository = _find_migrate_repo() versioning_api.version_control(session.get_engine(), repository, version) return version def _find_migrate_repo(): """Get the path for the migrate repository.""" global _REPOSITORY path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'migrate_repo') assert os.path.exists(path) if _REPOSITORY is None: _REPOSITORY = Repository(path) return _REPOSITORY nova-2014.1.5/nova/virt/baremetal/db/__init__.py0000664000567000056700000000125512540642544022441 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.baremetal.db.api import * # noqa nova-2014.1.5/nova/virt/baremetal/db/api.py0000664000567000056700000001157112540642544021455 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. Functions in this module are imported into the nova.virt.baremetal.db namespace. Call these functions from nova.virt.baremetal.db namespace, not the nova.virt.baremetal.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. Currently, many of these objects are sqlalchemy objects that implement a dictionary interface. However, a future goal is to have all of these objects be simple dictionaries. **Related Flags** :baremetal_db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :[BAREMETAL] sql_connection: string specifying the sqlalchemy connection to use, like: `sqlite:///var/lib/nova/nova.sqlite`. """ from oslo.config import cfg from nova import utils # NOTE(deva): we can't move baremetal_db_backend into an OptGroup yet # because utils.LazyPluggable doesn't support reading from # option groups. See bug #1093043. db_opts = [ cfg.StrOpt('db_backend', default='sqlalchemy', help='The backend to use for bare-metal database'), ] baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(db_opts, baremetal_group) IMPL = utils.LazyPluggable( 'db_backend', config_group='baremetal', sqlalchemy='nova.virt.baremetal.db.sqlalchemy.api') def bm_node_get_all(context, service_host=None): return IMPL.bm_node_get_all(context, service_host=service_host) def bm_node_get_associated(context, service_host=None): return IMPL.bm_node_get_associated(context, service_host=service_host) def bm_node_get_unassociated(context, service_host=None): return IMPL.bm_node_get_unassociated(context, service_host=service_host) def bm_node_find_free(context, service_host=None, memory_mb=None, cpus=None, local_gb=None): return IMPL.bm_node_find_free(context, service_host=service_host, memory_mb=memory_mb, cpus=cpus, local_gb=local_gb) def bm_node_get(context, bm_node_id): return IMPL.bm_node_get(context, bm_node_id) def bm_node_get_by_instance_uuid(context, instance_uuid): return IMPL.bm_node_get_by_instance_uuid(context, instance_uuid) def bm_node_get_by_node_uuid(context, node_uuid): return IMPL.bm_node_get_by_node_uuid(context, node_uuid) def bm_node_create(context, values): return IMPL.bm_node_create(context, values) def bm_node_destroy(context, bm_node_id): return IMPL.bm_node_destroy(context, bm_node_id) def bm_node_update(context, bm_node_id, values): return IMPL.bm_node_update(context, bm_node_id, values) def bm_node_associate_and_update(context, node_uuid, values): return IMPL.bm_node_associate_and_update(context, node_uuid, values) def bm_interface_get(context, if_id): return IMPL.bm_interface_get(context, if_id) def bm_interface_get_all(context): return IMPL.bm_interface_get_all(context) def bm_interface_destroy(context, if_id): return IMPL.bm_interface_destroy(context, if_id) def bm_interface_create(context, bm_node_id, address, datapath_id, port_no): return IMPL.bm_interface_create(context, bm_node_id, address, datapath_id, port_no) def bm_interface_set_vif_uuid(context, if_id, vif_uuid): return IMPL.bm_interface_set_vif_uuid(context, if_id, vif_uuid) def bm_interface_get_by_vif_uuid(context, vif_uuid): return IMPL.bm_interface_get_by_vif_uuid(context, vif_uuid) def bm_interface_get_all_by_bm_node_id(context, bm_node_id): return IMPL.bm_interface_get_all_by_bm_node_id(context, bm_node_id) nova-2014.1.5/nova/virt/baremetal/db/migration.py0000664000567000056700000000243212540642544022671 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from nova import utils IMPL = utils.LazyPluggable( 'db_backend', config_group='baremetal', sqlalchemy='nova.virt.baremetal.db.sqlalchemy.migration') def db_sync(version=None): """Migrate the database to `version` or the most recent version.""" return IMPL.db_sync(version=version) def db_version(): """Display the current database version.""" return IMPL.db_version() def db_initial_version(): """The starting version for the database.""" return IMPL.db_initial_version() nova-2014.1.5/nova/virt/baremetal/iboot_pdu.py0000664000567000056700000001014612540642544022300 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # iBoot Power Driver from nova import context as nova_context from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import base iboot = importutils.try_import('iboot') LOG = logging.getLogger(__name__) class IBootManager(base.PowerManager): """iBoot Power Driver for Baremetal Nova Compute This PowerManager class provides a mechanism for controlling power state via an iBoot capable device (tested with an iBoot G2). Requires installation of python-iboot: https://github.com/darkip/python-iboot """ def __init__(self, **kwargs): node = kwargs.pop('node', {}) addr_relay = str(node['pm_address']).split(',') if len(addr_relay) > 1: try: self.relay_id = int(addr_relay[1]) except ValueError: msg = _("iboot PDU relay ID must be an integer.") raise exception.InvalidParameterValue(msg) else: self.relay_id = 1 addr_port = addr_relay[0].split(':') self.address = addr_port[0] if len(addr_port) > 1: try: self.port = int(addr_port[1]) except ValueError: msg = _("iboot PDU port must be an integer.") raise exception.InvalidParameterValue(msg) else: self.port = 9100 self.user = str(node['pm_user']) self.password = str(node['pm_password']) instance = kwargs.pop('instance', {}) self.node_name = instance.get('hostname', "") context = nova_context.get_admin_context() self.state = None self.conn = None def _create_connection(self): if not self.conn: self.conn = iboot.iBootInterface(self.address, self.user, self.password, port=self.port, num_relays=self.relay_id) return self.conn def _switch(self, relay_id, enabled): return self.conn.switch(relay_id, enabled) def _get_relay(self, relay_id): return self.conn.get_relays()[relay_id - 1] def activate_node(self): LOG.info(_("activate_node name %s"), self.node_name) self._create_connection() self._switch(self.relay_id, True) if self.is_power_on(): self.state = baremetal_states.ACTIVE else: self.state = baremetal_states.ERROR return self.state def reboot_node(self): LOG.info(_("reboot_node: %s"), self.node_name) self._create_connection() self._switch(self.relay_id, False) self._switch(self.relay_id, True) if self.is_power_on(): self.state = baremetal_states.ACTIVE else: self.state = baremetal_states.ERROR return self.state def deactivate_node(self): LOG.info(_("deactivate_node name %s"), self.node_name) self._create_connection() if self.is_power_on(): self._switch(self.relay_id, False) if self.is_power_on(): self.state = baremetal_states.ERROR else: self.state = baremetal_states.DELETED return self.state def is_power_on(self): LOG.debug(_("Checking if %s is running"), self.node_name) self._create_connection() return self._get_relay(self.relay_id) nova-2014.1.5/nova/virt/baremetal/net-dhcp.ubuntu.template0000664000567000056700000000062212540642544024520 0ustar jenkinsjenkins00000000000000# Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback {% for ifc in interfaces -%} auto {{ ifc.name }} iface {{ ifc.name }} inet dhcp {% if use_ipv6 -%} iface {{ ifc.name }} inet6 dhcp {%- endif %} {%- endfor %} nova-2014.1.5/nova/virt/baremetal/pxe_config.template0000664000567000056700000000114212540642544023614 0ustar jenkinsjenkins00000000000000default deploy label deploy kernel {{ pxe_options.deployment_aki_path }} append initrd={{ pxe_options.deployment_ari_path }} selinux=0 disk=cciss/c0d0,sda,hda,vda iscsi_target_iqn={{ pxe_options.deployment_iscsi_iqn }} deployment_id={{ pxe_options.deployment_id }} deployment_key={{ pxe_options.deployment_key }} troubleshoot=0 {{ pxe_options.pxe_append_params|default("", true) }} ipappend 3 label boot kernel {{ pxe_options.aki_path }} append initrd={{ pxe_options.ari_path }} root={{ ROOT }} ro {{ pxe_options.pxe_append_params|default("", true) }} {{ pxe_options.pxe_network_config|default("", true) }} nova-2014.1.5/nova/virt/baremetal/tilera_pdu.py0000664000567000056700000001334112540642544022444 0ustar jenkinsjenkins00000000000000# coding=utf-8 # Copyright (c) 2011-2013 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Baremetal PDU power manager. """ import time from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import base opts = [ cfg.StrOpt('tile_pdu_ip', default='10.0.100.1', help='IP address of tilera pdu'), cfg.StrOpt('tile_pdu_mgr', default='/tftpboot/pdu_mgr', help='Management script for tilera pdu'), cfg.IntOpt('tile_pdu_off', default=2, help='Power status of tilera PDU is OFF'), cfg.IntOpt('tile_pdu_on', default=1, help='Power status of tilera PDU is ON'), cfg.IntOpt('tile_pdu_status', default=9, help='Power status of tilera PDU'), cfg.IntOpt('tile_power_wait', default=9, help='Wait time in seconds until check the result ' 'after tilera power operations'), ] baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(opts, baremetal_group) LOG = logging.getLogger(__name__) class Pdu(base.PowerManager): """PDU Power Driver for Baremetal Nova Compute This PowerManager class provides mechanism for controlling the power state of physical hardware via PDU calls. """ def __init__(self, node, **kwargs): self.state = None self.retries = None self.node_id = node['id'] self.address = node['pm_address'] self.user = node['pm_user'] self.password = node['pm_password'] self.port = node['terminal_port'] if self.node_id == None: raise exception.InvalidParameterValue(_("Node id not supplied " "to PDU")) if self.address == None: raise exception.InvalidParameterValue(_("Address not supplied " "to PDU")) if self.user == None: raise exception.InvalidParameterValue(_("User not supplied " "to PDU")) if self.password == None: raise exception.InvalidParameterValue(_("Password not supplied " "to PDU")) def _exec_pdutool(self, mode): """Changes power state of the given node. According to the mode (1-ON, 2-OFF, 3-REBOOT), power state can be changed. /tftpboot/pdu_mgr script handles power management of PDU (Power Distribution Unit). """ if mode == CONF.baremetal.tile_pdu_status: try: utils.execute('ping', '-c1', self.address, check_exit_code=True) return CONF.baremetal.tile_pdu_on except processutils.ProcessExecutionError: return CONF.baremetal.tile_pdu_off else: try: utils.execute(CONF.baremetal.tile_pdu_mgr, CONF.baremetal.tile_pdu_ip, mode) time.sleep(CONF.baremetal.tile_power_wait) return mode except processutils.ProcessExecutionError: LOG.exception(_("PDU failed")) def _is_power(self, state): out_err = self._exec_pdutool(CONF.baremetal.tile_pdu_status) return out_err == state def _power_on(self): """Turn the power to this node ON.""" try: self._exec_pdutool(CONF.baremetal.tile_pdu_on) if self._is_power(CONF.baremetal.tile_pdu_on): self.state = baremetal_states.ACTIVE else: self.state = baremetal_states.ERROR except Exception: self.state = baremetal_states.ERROR LOG.exception(_("PDU power on failed")) def _power_off(self): """Turn the power to this node OFF.""" try: self._exec_pdutool(CONF.baremetal.tile_pdu_off) if self._is_power(CONF.baremetal.tile_pdu_off): self.state = baremetal_states.DELETED else: self.state = baremetal_states.ERROR except Exception: self.state = baremetal_states.ERROR LOG.exception(_("PDU power off failed")) def activate_node(self): """Turns the power to node ON.""" if (self._is_power(CONF.baremetal.tile_pdu_on) and self.state == baremetal_states.ACTIVE): LOG.warning(_("Activate node called, but node %s " "is already active") % self.address) self._power_on() return self.state def reboot_node(self): """Cycles the power to a node.""" self._power_off() self._power_on() return self.state def deactivate_node(self): """Turns the power to node OFF, regardless of current state.""" self._power_off() return self.state def is_power_on(self): return self._is_power(CONF.baremetal.tile_pdu_on) nova-2014.1.5/nova/virt/baremetal/base.py0000664000567000056700000000500712540642544021226 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.baremetal import baremetal_states class NodeDriver(object): def __init__(self, virtapi): self.virtapi = virtapi def cache_images(self, context, node, instance, **kwargs): raise NotImplementedError() def destroy_images(self, context, node, instance): raise NotImplementedError() def activate_bootloader(self, context, node, instance, **kwargs): raise NotImplementedError() def deactivate_bootloader(self, context, node, instance): raise NotImplementedError() def activate_node(self, context, node, instance): """For operations after power on.""" raise NotImplementedError() def deactivate_node(self, context, node, instance): """For operations before power off.""" raise NotImplementedError() def get_console_output(self, node, instance): raise NotImplementedError() def dhcp_options_for_instance(self, instance): """Optional override to return the DHCP options to use for instance. If no DHCP options are needed, this should not be overridden or None should be returned. """ return None class PowerManager(object): def __init__(self, **kwargs): self.state = baremetal_states.DELETED pass def activate_node(self): self.state = baremetal_states.ACTIVE return self.state def reboot_node(self): self.state = baremetal_states.ACTIVE return self.state def deactivate_node(self): self.state = baremetal_states.DELETED return self.state def is_power_on(self): """Returns True or False according as the node's power state.""" return True # TODO(NTTdocomo): split out console methods to its own class def start_console(self): pass def stop_console(self): pass nova-2014.1.5/nova/virt/baremetal/driver.py0000664000567000056700000006056412540642544021620 0ustar jenkinsjenkins00000000000000# coding=utf-8 # # Copyright (c) 2012 NTT DOCOMO, INC # Copyright (c) 2011 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A driver for Bare-metal platform. """ from oslo.config import cfg from nova.compute import flavors from nova.compute import power_state from nova.compute import task_states from nova import context as nova_context from nova import exception from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import db from nova.virt import driver from nova.virt import firewall from nova.virt.libvirt import imagecache LOG = logging.getLogger(__name__) opts = [ cfg.StrOpt('vif_driver', default='nova.virt.baremetal.vif_driver.BareMetalVIFDriver', help='Baremetal VIF driver.'), cfg.StrOpt('volume_driver', default='nova.virt.baremetal.volume_driver.LibvirtVolumeDriver', help='Baremetal volume driver.'), cfg.ListOpt('flavor_extra_specs', default=[], help='A list of additional capabilities corresponding to ' 'flavor_extra_specs for this compute ' 'host to advertise. Valid entries are name=value, pairs ' 'For example, "key1:val1, key2:val2"', deprecated_name='instance_type_extra_specs'), cfg.StrOpt('driver', default='nova.virt.baremetal.pxe.PXE', help='Baremetal driver back-end (pxe or tilera)'), cfg.StrOpt('power_manager', default='nova.virt.baremetal.ipmi.IPMI', help='Baremetal power management method'), cfg.StrOpt('tftp_root', default='/tftpboot', help='Baremetal compute node\'s tftp root path'), ] baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(opts, baremetal_group) CONF.import_opt('host', 'nova.netconf') CONF.import_opt('my_ip', 'nova.netconf') DEFAULT_FIREWALL_DRIVER = "%s.%s" % ( firewall.__name__, firewall.NoopFirewallDriver.__name__) def _get_baremetal_node_by_instance_uuid(instance_uuid): ctx = nova_context.get_admin_context() node = db.bm_node_get_by_instance_uuid(ctx, instance_uuid) if node['service_host'] != CONF.host: LOG.error(_("Request for baremetal node %s " "sent to wrong service host") % instance_uuid) raise exception.InstanceNotFound(instance_id=instance_uuid) return node def _update_state(context, node, instance, state): """Update the node state in baremetal DB If instance is not supplied, reset the instance_uuid field for this node. """ values = {'task_state': state} if not instance: values['instance_uuid'] = None values['instance_name'] = None db.bm_node_update(context, node['id'], values) def get_power_manager(**kwargs): cls = importutils.import_class(CONF.baremetal.power_manager) return cls(**kwargs) class BareMetalDriver(driver.ComputeDriver): """BareMetal hypervisor driver.""" capabilities = { "has_imagecache": True, "supports_recreate": False, } def __init__(self, virtapi, read_only=False): super(BareMetalDriver, self).__init__(virtapi) self.driver = importutils.import_object( CONF.baremetal.driver, virtapi) self.vif_driver = importutils.import_object( CONF.baremetal.vif_driver) self.firewall_driver = firewall.load_driver( default=DEFAULT_FIREWALL_DRIVER) self.volume_driver = importutils.import_object( CONF.baremetal.volume_driver, virtapi) self.image_cache_manager = imagecache.ImageCacheManager() extra_specs = {} extra_specs["baremetal_driver"] = CONF.baremetal.driver for pair in CONF.baremetal.flavor_extra_specs: keyval = pair.split(':', 1) keyval[0] = keyval[0].strip() keyval[1] = keyval[1].strip() extra_specs[keyval[0]] = keyval[1] if 'cpu_arch' not in extra_specs: LOG.warning( _('cpu_arch is not found in flavor_extra_specs')) extra_specs['cpu_arch'] = '' self.extra_specs = extra_specs self.supported_instances = [ (extra_specs['cpu_arch'], 'baremetal', 'baremetal'), ] @classmethod def instance(cls): if not hasattr(cls, '_instance'): cls._instance = cls() return cls._instance def init_host(self, host): return def get_hypervisor_type(self): return 'baremetal' def get_hypervisor_version(self): # TODO(deva): define the version properly elsewhere return 1 def list_instances(self): l = [] context = nova_context.get_admin_context() for node in db.bm_node_get_associated(context, service_host=CONF.host): l.append(node['instance_name']) return l def _require_node(self, instance): """Get a node's uuid out of a manager instance dict. The compute manager is meant to know the node uuid, so missing uuid a significant issue - it may mean we've been passed someone elses data. """ node_uuid = instance.get('node') if not node_uuid: raise exception.NovaException(_( "Baremetal node id not supplied to driver for %r") % instance['uuid']) return node_uuid def _attach_block_devices(self, instance, block_device_info): block_device_mapping = driver.\ block_device_info_get_mapping(block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] mountpoint = vol['mount_device'] self.attach_volume(None, connection_info, instance, mountpoint) def _detach_block_devices(self, instance, block_device_info): block_device_mapping = driver.\ block_device_info_get_mapping(block_device_info) for vol in block_device_mapping: connection_info = vol['connection_info'] mountpoint = vol['mount_device'] self.detach_volume( connection_info, instance, mountpoint) def _start_firewall(self, instance, network_info): self.firewall_driver.setup_basic_filtering( instance, network_info) self.firewall_driver.prepare_instance_filter( instance, network_info) self.firewall_driver.apply_instance_filter( instance, network_info) def _stop_firewall(self, instance, network_info): self.firewall_driver.unfilter_instance( instance, network_info) def macs_for_instance(self, instance): context = nova_context.get_admin_context() node_uuid = self._require_node(instance) node = db.bm_node_get_by_node_uuid(context, node_uuid) ifaces = db.bm_interface_get_all_by_bm_node_id(context, node['id']) return set(iface['address'] for iface in ifaces) def _set_default_ephemeral_device(self, instance): flavor = flavors.extract_flavor(instance) if flavor['ephemeral_gb']: self.virtapi.instance_update( nova_context.get_admin_context(), instance['uuid'], {'default_ephemeral_device': '/dev/sda1'}) def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): node_uuid = self._require_node(instance) self._set_default_ephemeral_device(instance) # NOTE(deva): this db method will raise an exception if the node is # already in use. We call it here to ensure no one else # allocates this node before we begin provisioning it. node = db.bm_node_associate_and_update(context, node_uuid, {'instance_uuid': instance['uuid'], 'instance_name': instance['hostname'], 'task_state': baremetal_states.BUILDING, 'preserve_ephemeral': False}) self._spawn(node, context, instance, image_meta, injected_files, admin_password, network_info=network_info, block_device_info=block_device_info) def _spawn(self, node, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): try: self._plug_vifs(instance, network_info, context=context) self._attach_block_devices(instance, block_device_info) self._start_firewall(instance, network_info) # Caching images is both CPU and I/O expensive. When running many # machines from a single nova-compute server, deploys of multiple # machines can easily thrash the nova-compute server - unlike a # virt hypervisor which is limited by CPU for VMs, baremetal only # uses CPU and I/O when deploying. By only downloading one image # at a time we serialise rather than thrashing, which leads to a # lower average time-to-complete during overload situations, and # a (relatively) insignificant delay for compute servers which # have sufficient IOPS to handle multiple concurrent image # conversions. with lockutils.lock('nova-baremetal-cache-images', external=True): self.driver.cache_images( context, node, instance, admin_password=admin_password, image_meta=image_meta, injected_files=injected_files, network_info=network_info, ) self.driver.activate_bootloader(context, node, instance, network_info=network_info) # NOTE(deva): ensure node is really off before we turn it on # fixes bug https://code.launchpad.net/bugs/1178919 self.power_off(instance, node) self.power_on(context, instance, network_info, block_device_info, node) _update_state(context, node, instance, baremetal_states.PREPARED) self.driver.activate_node(context, node, instance) _update_state(context, node, instance, baremetal_states.ACTIVE) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_("Error deploying instance %(instance)s " "on baremetal node %(node)s.") % {'instance': instance['uuid'], 'node': node['uuid']}) # Do not set instance=None yet. This prevents another # spawn() while we are cleaning up. _update_state(context, node, instance, baremetal_states.ERROR) self.driver.deactivate_node(context, node, instance) self.power_off(instance, node) self.driver.deactivate_bootloader(context, node, instance) self.driver.destroy_images(context, node, instance) self._detach_block_devices(instance, block_device_info) self._stop_firewall(instance, network_info) self._unplug_vifs(instance, network_info) _update_state(context, node, None, baremetal_states.DELETED) else: # We no longer need the image since we successfully deployed. self.driver.destroy_images(context, node, instance) def rebuild(self, context, instance, image_meta, injected_files, admin_password, bdms, detach_block_devices, attach_block_devices, network_info=None, recreate=False, block_device_info=None, preserve_ephemeral=False): """Destroy and re-make this instance. A 'rebuild' effectively purges all existing data from the system and remakes the VM with given 'metadata' and 'personalities'. :param context: Security context. :param instance: Instance object. :param image_meta: Image object returned by nova.image.glance that defines the image from which to boot this instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param bdms: block-device-mappings to use for rebuild :param detach_block_devices: function to detach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. :param attach_block_devices: function to attach block devices. See nova.compute.manager.ComputeManager:_rebuild_default_impl for usage. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param block_device_info: Information about block devices to be attached to the instance. :param recreate: True if instance should be recreated with same disk. :param preserve_ephemeral: True if the default ephemeral storage partition must be preserved on rebuild. """ instance.task_state = task_states.REBUILD_SPAWNING instance.save(expected_task_state=[task_states.REBUILDING]) node_uuid = self._require_node(instance) node = db.bm_node_get_by_node_uuid(context, node_uuid) db.bm_node_update( context, node['id'], {'task_state': baremetal_states.BUILDING, 'preserve_ephemeral': preserve_ephemeral} ) self._spawn(node, context, instance, image_meta, injected_files, admin_password, network_info=network_info, block_device_info=block_device_info) def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): node = _get_baremetal_node_by_instance_uuid(instance['uuid']) ctx = nova_context.get_admin_context() pm = get_power_manager(node=node, instance=instance) state = pm.reboot_node() if pm.state != baremetal_states.ACTIVE: raise exception.InstanceRebootFailure(_( "Baremetal power manager failed to restart node " "for instance %r") % instance['uuid']) _update_state(ctx, node, instance, state) def destroy(self, context, instance, network_info, block_device_info=None): context = nova_context.get_admin_context() try: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) except exception.InstanceNotFound: LOG.warning(_("Destroy called on non-existing instance %s") % instance['uuid']) return try: self.driver.deactivate_node(context, node, instance) self.power_off(instance, node) self.driver.deactivate_bootloader(context, node, instance) self.driver.destroy_images(context, node, instance) self._detach_block_devices(instance, block_device_info) self._stop_firewall(instance, network_info) self._unplug_vifs(instance, network_info) _update_state(context, node, None, baremetal_states.DELETED) except Exception as e: with excutils.save_and_reraise_exception(): try: LOG.error(_("Error from baremetal driver " "during destroy: %s") % e) _update_state(context, node, instance, baremetal_states.ERROR) except Exception: LOG.error(_("Error while recording destroy failure in " "baremetal database: %s") % e) def cleanup(self, context, instance, network_info, block_device_info=None, destroy_disks=True): """Cleanup after instance being destroyed.""" pass def power_off(self, instance, node=None): """Power off the specified instance.""" if not node: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) pm = get_power_manager(node=node, instance=instance) pm.deactivate_node() if pm.state != baremetal_states.DELETED: raise exception.InstancePowerOffFailure(_( "Baremetal power manager failed to stop node " "for instance %r") % instance['uuid']) pm.stop_console() def power_on(self, context, instance, network_info, block_device_info=None, node=None): """Power on the specified instance.""" if not node: node = _get_baremetal_node_by_instance_uuid(instance['uuid']) pm = get_power_manager(node=node, instance=instance) pm.activate_node() if pm.state != baremetal_states.ACTIVE: raise exception.InstancePowerOnFailure(_( "Baremetal power manager failed to start node " "for instance %r") % instance['uuid']) pm.start_console() def get_volume_connector(self, instance): return self.volume_driver.get_volume_connector(instance) def attach_volume(self, context, connection_info, instance, mountpoint, disk_bus=None, device_type=None, encryption=None): return self.volume_driver.attach_volume(connection_info, instance, mountpoint) def detach_volume(self, connection_info, instance, mountpoint, encryption=None): return self.volume_driver.detach_volume(connection_info, instance, mountpoint) def get_info(self, instance): inst_uuid = instance.get('uuid') node = _get_baremetal_node_by_instance_uuid(inst_uuid) pm = get_power_manager(node=node, instance=instance) # NOTE(deva): Power manager may not be able to determine power state # in which case it may return "None" here. ps = pm.is_power_on() if ps: pstate = power_state.RUNNING elif ps is False: pstate = power_state.SHUTDOWN else: pstate = power_state.NOSTATE return {'state': pstate, 'max_mem': node['memory_mb'], 'mem': node['memory_mb'], 'num_cpu': node['cpus'], 'cpu_time': 0} def refresh_security_group_rules(self, security_group_id): self.firewall_driver.refresh_security_group_rules(security_group_id) return True def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) return True def refresh_provider_fw_rules(self): self.firewall_driver.refresh_provider_fw_rules() def _node_resource(self, node): vcpus_used = 0 memory_mb_used = 0 local_gb_used = 0 vcpus = node['cpus'] memory_mb = node['memory_mb'] local_gb = node['local_gb'] if node['instance_uuid']: vcpus_used = node['cpus'] memory_mb_used = node['memory_mb'] local_gb_used = node['local_gb'] dic = {'vcpus': vcpus, 'memory_mb': memory_mb, 'local_gb': local_gb, 'vcpus_used': vcpus_used, 'memory_mb_used': memory_mb_used, 'local_gb_used': local_gb_used, 'hypervisor_type': self.get_hypervisor_type(), 'hypervisor_version': self.get_hypervisor_version(), 'hypervisor_hostname': str(node['uuid']), 'cpu_info': 'baremetal cpu', 'supported_instances': jsonutils.dumps(self.supported_instances), 'stats': jsonutils.dumps(self.extra_specs) } return dic def refresh_instance_security_rules(self, instance): self.firewall_driver.refresh_instance_security_rules(instance) def get_available_resource(self, nodename): context = nova_context.get_admin_context() resource = {} try: node = db.bm_node_get_by_node_uuid(context, nodename) resource = self._node_resource(node) except exception.NodeNotFoundByUUID: pass return resource def ensure_filtering_rules_for_instance(self, instance_ref, network_info): self.firewall_driver.setup_basic_filtering(instance_ref, network_info) self.firewall_driver.prepare_instance_filter(instance_ref, network_info) def unfilter_instance(self, instance_ref, network_info): self.firewall_driver.unfilter_instance(instance_ref, network_info=network_info) def get_host_stats(self, refresh=False): caps = [] context = nova_context.get_admin_context() nodes = db.bm_node_get_all(context, service_host=CONF.host) for node in nodes: res = self._node_resource(node) nodename = str(node['uuid']) data = {} data['vcpus'] = res['vcpus'] data['vcpus_used'] = res['vcpus_used'] data['cpu_info'] = res['cpu_info'] data['disk_total'] = res['local_gb'] data['disk_used'] = res['local_gb_used'] data['disk_available'] = res['local_gb'] - res['local_gb_used'] data['host_memory_total'] = res['memory_mb'] data['host_memory_free'] = res['memory_mb'] - res['memory_mb_used'] data['hypervisor_type'] = res['hypervisor_type'] data['hypervisor_version'] = res['hypervisor_version'] data['hypervisor_hostname'] = nodename data['supported_instances'] = self.supported_instances data.update(self.extra_specs) data['host'] = CONF.host data['node'] = nodename # TODO(NTTdocomo): put node's extra specs here caps.append(data) return caps def plug_vifs(self, instance, network_info): """Plugin VIFs into networks.""" self._plug_vifs(instance, network_info) def _plug_vifs(self, instance, network_info, context=None): if not context: context = nova_context.get_admin_context() node = _get_baremetal_node_by_instance_uuid(instance['uuid']) if node: pifs = db.bm_interface_get_all_by_bm_node_id(context, node['id']) for pif in pifs: if pif['vif_uuid']: db.bm_interface_set_vif_uuid(context, pif['id'], None) for vif in network_info: self.vif_driver.plug(instance, vif) def _unplug_vifs(self, instance, network_info): for vif in network_info: self.vif_driver.unplug(instance, vif) def manage_image_cache(self, context, all_instances): """Manage the local cache of images.""" self.image_cache_manager.update(context, all_instances) def get_console_output(self, context, instance): node = _get_baremetal_node_by_instance_uuid(instance.uuid) return self.driver.get_console_output(node, instance) def get_available_nodes(self, refresh=False): context = nova_context.get_admin_context() return [str(n['uuid']) for n in db.bm_node_get_all(context, service_host=CONF.host)] def dhcp_options_for_instance(self, instance): return self.driver.dhcp_options_for_instance(instance) nova-2014.1.5/nova/virt/baremetal/volume_driver.py0000664000567000056700000002444212540642544023202 0ustar jenkinsjenkins00000000000000# coding=utf-8 # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from oslo.config import cfg from nova import context as nova_context from nova import exception from nova import network from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova import utils from nova.virt.baremetal import db as bmdb from nova.virt import volumeutils opts = [ cfg.BoolOpt('use_unsafe_iscsi', default=False, help='Do not set this out of dev/test environments. ' 'If a node does not have a fixed PXE IP address, ' 'volumes are exported with globally opened ACL'), cfg.StrOpt('iscsi_iqn_prefix', default='iqn.2010-10.org.openstack.baremetal', help='The iSCSI IQN prefix used in baremetal volume ' 'connections.'), ] baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(opts, baremetal_group) CONF.import_opt('host', 'nova.netconf') CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('volume_drivers', 'nova.virt.libvirt.driver', group='libvirt') LOG = logging.getLogger(__name__) def _get_baremetal_node_by_instance_uuid(instance_uuid): context = nova_context.get_admin_context() return bmdb.bm_node_get_by_instance_uuid(context, instance_uuid) def _create_iscsi_export_tgtadm(path, tid, iqn): utils.execute('tgtadm', '--lld', 'iscsi', '--mode', 'target', '--op', 'new', '--tid', tid, '--targetname', iqn, run_as_root=True) utils.execute('tgtadm', '--lld', 'iscsi', '--mode', 'logicalunit', '--op', 'new', '--tid', tid, '--lun', '1', '--backing-store', path, run_as_root=True) def _allow_iscsi_tgtadm(tid, address): utils.execute('tgtadm', '--lld', 'iscsi', '--mode', 'target', '--op', 'bind', '--tid', tid, '--initiator-address', address, run_as_root=True) def _delete_iscsi_export_tgtadm(tid): try: utils.execute('tgtadm', '--lld', 'iscsi', '--mode', 'logicalunit', '--op', 'delete', '--tid', tid, '--lun', '1', run_as_root=True) except processutils.ProcessExecutionError: pass try: utils.execute('tgtadm', '--lld', 'iscsi', '--mode', 'target', '--op', 'delete', '--tid', tid, run_as_root=True) except processutils.ProcessExecutionError: pass # Check if the tid is deleted, that is, check the tid no longer exists. # If the tid dose not exist, tgtadm returns with exit_code 22. # utils.execute() can check the exit_code if check_exit_code parameter is # passed. But, regardless of whether check_exit_code contains 0 or not, # if the exit_code is 0, the function dose not report errors. So we have to # catch a ProcessExecutionError and test its exit_code is 22. try: utils.execute('tgtadm', '--lld', 'iscsi', '--mode', 'target', '--op', 'show', '--tid', tid, run_as_root=True) except processutils.ProcessExecutionError as e: if e.exit_code == 22: # OK, the tid is deleted return raise raise exception.NovaException(_( 'baremetal driver was unable to delete tid %s') % tid) def _show_tgtadm(): out, _ = utils.execute('tgtadm', '--lld', 'iscsi', '--mode', 'target', '--op', 'show', run_as_root=True) return out def _list_backingstore_path(): out = _show_tgtadm() l = [] for line in out.split('\n'): m = re.search(r'Backing store path: (.*)$', line) if m: if '/' in m.group(1): l.append(m.group(1)) return l def _get_next_tid(): out = _show_tgtadm() last_tid = 0 for line in out.split('\n'): m = re.search(r'^Target (\d+):', line) if m: tid = int(m.group(1)) if last_tid < tid: last_tid = tid return last_tid + 1 def _find_tid(iqn): out = _show_tgtadm() pattern = r'^Target (\d+): *' + re.escape(iqn) for line in out.split('\n'): m = re.search(pattern, line) if m: return int(m.group(1)) return None def _get_iqn(instance_name, mountpoint): mp = mountpoint.replace('/', '-').strip('-') iqn = '%s:%s-%s' % (CONF.baremetal.iscsi_iqn_prefix, instance_name, mp) return iqn def _get_fixed_ips(instance): context = nova_context.get_admin_context() nw_info = network.API().get_instance_nw_info(context, instance) ips = nw_info.fixed_ips() return ips class VolumeDriver(object): def __init__(self, virtapi): super(VolumeDriver, self).__init__() self.virtapi = virtapi self._initiator = None def get_volume_connector(self, instance): if not self._initiator: self._initiator = volumeutils.get_iscsi_initiator() if not self._initiator: LOG.warn(_('Could not determine iscsi initiator name'), instance=instance) return { 'ip': CONF.my_ip, 'initiator': self._initiator, 'host': CONF.host, } def attach_volume(self, connection_info, instance, mountpoint): raise NotImplementedError() def detach_volume(self, connection_info, instance, mountpoint): raise NotImplementedError() class LibvirtVolumeDriver(VolumeDriver): """The VolumeDriver delegates to nova.virt.libvirt.volume.""" def __init__(self, virtapi): super(LibvirtVolumeDriver, self).__init__(virtapi) self.volume_drivers = {} for driver_str in CONF.libvirt.volume_drivers: driver_type, _sep, driver = driver_str.partition('=') driver_class = importutils.import_class(driver) self.volume_drivers[driver_type] = driver_class(self) def _volume_driver_method(self, method_name, connection_info, *args, **kwargs): driver_type = connection_info.get('driver_volume_type') if driver_type not in self.volume_drivers: raise exception.VolumeDriverNotFound(driver_type=driver_type) driver = self.volume_drivers[driver_type] method = getattr(driver, method_name) return method(connection_info, *args, **kwargs) def attach_volume(self, connection_info, instance, mountpoint): fixed_ips = _get_fixed_ips(instance) if not fixed_ips: if not CONF.baremetal.use_unsafe_iscsi: raise exception.NovaException(_( 'No fixed PXE IP is associated to %s') % instance['uuid']) mount_device = mountpoint.rpartition("/")[2] disk_info = { 'dev': mount_device, 'bus': 'baremetal', 'type': 'baremetal', } conf = self._connect_volume(connection_info, disk_info) self._publish_iscsi(instance, mountpoint, fixed_ips, conf.source_path) def _connect_volume(self, connection_info, disk_info): return self._volume_driver_method('connect_volume', connection_info, disk_info) def _publish_iscsi(self, instance, mountpoint, fixed_ips, device_path): iqn = _get_iqn(instance['name'], mountpoint) tid = _get_next_tid() _create_iscsi_export_tgtadm(device_path, tid, iqn) if fixed_ips: for ip in fixed_ips: _allow_iscsi_tgtadm(tid, ip['address']) else: # NOTE(NTTdocomo): Since nova-compute does not know the # instance's initiator ip, it allows any initiators # to connect to the volume. This means other bare-metal # instances that are not attached the volume can connect # to the volume. Do not set CONF.baremetal.use_unsafe_iscsi # out of dev/test environments. # TODO(NTTdocomo): support CHAP _allow_iscsi_tgtadm(tid, 'ALL') def detach_volume(self, connection_info, instance, mountpoint): mount_device = mountpoint.rpartition("/")[2] try: self._depublish_iscsi(instance, mountpoint) finally: self._disconnect_volume(connection_info, mount_device) def _disconnect_volume(self, connection_info, disk_dev): return self._volume_driver_method('disconnect_volume', connection_info, disk_dev) def _depublish_iscsi(self, instance, mountpoint): iqn = _get_iqn(instance['name'], mountpoint) tid = _find_tid(iqn) if tid is not None: _delete_iscsi_export_tgtadm(tid) else: LOG.warn(_('detach volume could not find tid for %s'), iqn, instance=instance) def get_all_block_devices(self): """Return all block devices in use on this node.""" return _list_backingstore_path() def get_hypervisor_version(self): """A dummy method for LibvirtBaseVolumeDriver.connect_volume.""" return 1 nova-2014.1.5/nova/virt/baremetal/fake.py0000664000567000056700000000435512540642544021227 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.baremetal import base from nova.virt import firewall class FakeDriver(base.NodeDriver): def cache_images(self, context, node, instance, **kwargs): pass def destroy_images(self, context, node, instance): pass def activate_bootloader(self, context, node, instance, **kwargs): pass def deactivate_bootloader(self, context, node, instance): pass def activate_node(self, context, node, instance): """For operations after power on.""" pass def deactivate_node(self, context, node, instance): """For operations before power off.""" pass def get_console_output(self, node, instance): return 'fake\nconsole\noutput for instance %s' % instance.id class FakePowerManager(base.PowerManager): def __init__(self, **kwargs): super(FakePowerManager, self).__init__(**kwargs) class FakeFirewallDriver(firewall.NoopFirewallDriver): def __init__(self): super(FakeFirewallDriver, self).__init__() class FakeVifDriver(object): def __init__(self): super(FakeVifDriver, self).__init__() def plug(self, instance, vif): pass def unplug(self, instance, vif): pass class FakeVolumeDriver(object): def __init__(self, virtapi): super(FakeVolumeDriver, self).__init__() self.virtapi = virtapi self._initiator = "fake_initiator" def attach_volume(self, connection_info, instance, mountpoint): pass def detach_volume(self, connection_info, instance, mountpoint): pass nova-2014.1.5/nova/virt/baremetal/virtual_power_driver.py0000664000567000056700000002022212540642544024565 0ustar jenkinsjenkins00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Virtual power driver from oslo.config import cfg from nova import context as nova_context from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import base from nova.virt.baremetal import common as connection from nova.virt.baremetal import db opts = [ cfg.StrOpt('virtual_power_ssh_host', default='', help='IP or name to virtual power host'), cfg.IntOpt('virtual_power_ssh_port', default=22, help='Port to use for ssh to virtual power host'), cfg.StrOpt('virtual_power_type', default='virsh', help='Base command to use for virtual power(vbox, virsh)'), cfg.StrOpt('virtual_power_host_user', default='', help='User to execute virtual power commands as'), cfg.StrOpt('virtual_power_host_pass', default='', help='Password for virtual power host_user'), cfg.StrOpt('virtual_power_host_key', help='The ssh key for virtual power host_user'), ] baremetal_vp = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_vp) CONF.register_opts(opts, baremetal_vp) _conn = None _vp_cmd = None _cmds = None LOG = logging.getLogger(__name__) def _normalize_mac(mac): return mac.replace(':', '').lower() class VirtualPowerManager(base.PowerManager): """Virtual Power Driver for Baremetal Nova Compute This PowerManager class provides mechanism for controlling the power state of VMs based on their name and MAC address. It uses ssh to connect to the VM's host and issue commands. Node will be matched based on mac address NOTE: for use in dev/test environments only! """ def __init__(self, **kwargs): global _conn global _cmds if _cmds is None: LOG.debug(_("Setting up %s commands."), CONF.baremetal.virtual_power_type) _vpc = 'nova.virt.baremetal.virtual_power_driver_settings.%s' % \ CONF.baremetal.virtual_power_type _cmds = importutils.import_class(_vpc) self._vp_cmd = _cmds() self.connection_data = _conn node = kwargs.pop('node', {}) instance = kwargs.pop('instance', {}) self._node_name = instance.get('hostname', "") context = nova_context.get_admin_context() ifs = db.bm_interface_get_all_by_bm_node_id(context, node['id']) self._mac_addresses = [_normalize_mac(i['address']) for i in ifs] self._connection = None self._matched_name = '' self.state = None def _get_conn(self): if not CONF.baremetal.virtual_power_ssh_host: raise exception.NovaException( _('virtual_power_ssh_host not defined. Can not Start')) if not CONF.baremetal.virtual_power_host_user: raise exception.NovaException( _('virtual_power_host_user not defined. Can not Start')) if not CONF.baremetal.virtual_power_host_pass: # it is ok to not have a password if you have a keyfile if CONF.baremetal.virtual_power_host_key is None: raise exception.NovaException( _('virtual_power_host_pass/key not set. Can not Start')) _conn = connection.Connection( CONF.baremetal.virtual_power_ssh_host, CONF.baremetal.virtual_power_host_user, CONF.baremetal.virtual_power_host_pass, CONF.baremetal.virtual_power_ssh_port, CONF.baremetal.virtual_power_host_key) return _conn def _set_connection(self): if self._connection is None: if self.connection_data is None: self.connection_data = self._get_conn() self._connection = connection.ssh_connect(self.connection_data) def _get_full_node_list(self): LOG.debug(_("Getting full node list.")) cmd = self._vp_cmd.list_cmd full_list = self._run_command(cmd) return full_list def _check_for_node(self): LOG.debug(_("Looking up Name for Mac address %s."), self._mac_addresses) self._matched_name = '' full_node_list = self._get_full_node_list() for node in full_node_list: cmd = self._vp_cmd.get_node_macs.replace('{_NodeName_}', node) mac_address_list = self._run_command(cmd) for mac in mac_address_list: if _normalize_mac(mac) in self._mac_addresses: self._matched_name = ('"%s"' % node) break return self._matched_name def activate_node(self): LOG.info(_("activate_node name %s"), self._node_name) if self._check_for_node(): cmd = self._vp_cmd.start_cmd self._run_command(cmd) if self.is_power_on(): self.state = baremetal_states.ACTIVE else: self.state = baremetal_states.ERROR return self.state def reboot_node(self): LOG.info(_("reset node: %s"), self._node_name) if self._check_for_node(): cmd = self._vp_cmd.reboot_cmd self._run_command(cmd) if self.is_power_on(): self.state = baremetal_states.ACTIVE else: self.state = baremetal_states.ERROR return self.state def deactivate_node(self): LOG.info(_("deactivate_node name %s"), self._node_name) if self._check_for_node(): if self.is_power_on(): cmd = self._vp_cmd.stop_cmd self._run_command(cmd) if self.is_power_on(): self.state = baremetal_states.ERROR else: self.state = baremetal_states.DELETED return self.state def is_power_on(self): LOG.debug(_("Checking if %s is running"), self._node_name) if not self._check_for_node(): err_msg = _('Node "%(name)s" with MAC address %(mac)s not found.') LOG.error(err_msg, {'name': self._node_name, 'mac': self._mac_addresses}) # in our case the _node_name is the the node_id raise exception.NodeNotFound(node_id=self._node_name) cmd = self._vp_cmd.list_running_cmd running_node_list = self._run_command(cmd) for node in running_node_list: if self._matched_name in node: return True return False def _run_command(self, cmd, check_exit_code=True): """Run a remote command using an active ssh connection. :param command: String with the command to run. If {_NodeName_} is in the command it will get replaced by the _matched_name value. base_cmd will also get prepended to the command. """ self._set_connection() cmd = cmd.replace('{_NodeName_}', self._matched_name) cmd = '%s %s' % (self._vp_cmd.base_cmd, cmd) try: stdout, stderr = processutils.ssh_execute( self._connection, cmd, check_exit_code=check_exit_code) result = stdout.strip().splitlines() LOG.debug(_('Result for run_command: %s'), result) except processutils.ProcessExecutionError: result = [] LOG.exception(_("Error running command: %s"), cmd) return result nova-2014.1.5/nova/virt/baremetal/__init__.py0000664000567000056700000000131412540642544022050 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.virt.baremetal import driver BareMetalDriver = driver.BareMetalDriver nova-2014.1.5/nova/virt/baremetal/common.py0000664000567000056700000000406512540642544021607 0ustar jenkinsjenkins00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import paramiko from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) CONNECTION_TIMEOUT = 60 class ConnectionFailed(exception.NovaException): msg_fmt = _('Connection failed') class Connection(object): def __init__(self, host, username, password, port=22, keyfile=None): self.host = host self.username = username self.password = password self.port = port self.keyfile = keyfile def ssh_connect(connection): """Method to connect to remote system using ssh protocol. :param connection: a Connection object. :returns: paramiko.SSHClient -- an active ssh connection. :raises: ConnectionFailed """ try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(connection.host, username=connection.username, password=connection.password, port=connection.port, key_filename=connection.keyfile, timeout=CONNECTION_TIMEOUT) LOG.debug("SSH connection with %s established successfully." % connection.host) # send TCP keepalive packets every 20 seconds ssh.get_transport().set_keepalive(20) return ssh except Exception: LOG.exception(_('Connection error')) raise ConnectionFailed() nova-2014.1.5/nova/virt/baremetal/virtual_power_driver_settings.py0000664000567000056700000000367312540642544026520 0ustar jenkinsjenkins00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # Virtual power driver commands class vbox(object): """set commands for basic Virtual Box control.""" def __init__(self): self.base_cmd = '/usr/bin/VBoxManage' self.start_cmd = 'startvm {_NodeName_}' self.stop_cmd = 'controlvm {_NodeName_} poweroff' self.reboot_cmd = 'controlvm {_NodeName_} reset' self.list_cmd = "list vms|awk -F'\"' '{print $2}'" self.list_running_cmd = 'list runningvms' self.get_node_macs = ("showvminfo --machinereadable {_NodeName_} | " "grep " '"macaddress" | awk -F ' "'" '"' "' '{print $2}'") class virsh(object): """set commands for basic Virsh control.""" def __init__(self): self.base_cmd = '/usr/bin/virsh' self.start_cmd = 'start {_NodeName_}' self.stop_cmd = 'destroy {_NodeName_}' self.reboot_cmd = 'reset {_NodeName_}' self.list_cmd = "list --all | tail -n +2 | awk -F\" \" '{print $2}'" self.list_running_cmd = \ "list --all|grep running|awk -v qc='\"' -F\" \" '{print qc$2qc}'" self.get_node_macs = ("dumpxml {_NodeName_} | grep " '"mac address" | awk -F' '"' "'" '" ' "'{print $2}' | tr -d ':'") nova-2014.1.5/nova/virt/baremetal/doc/0000775000567000056700000000000012540643452020504 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/virt/baremetal/doc/README.rst0000664000567000056700000000354312540642544022201 0ustar jenkinsjenkins00000000000000General Bare-metal Provisioning README ====================================== :Authors: [USC/ISI] Mikyung Kang , David Kang [NTT DOCOMO] Ken Igarashi [VirtualTech Japan Inc.] Arata Notsu :Date: 2012-08-02 :Version: 2012.8 :Wiki: http://wiki.openstack.org/GeneralBareMetalProvisioningFramework Code changes ------------ :: nova/nova/virt/baremetal/* nova/nova/virt/driver.py nova/nova/tests/baremetal/* nova/nova/tests/compute/test_compute.py nova/nova/compute/manager.py nova/nova/compute/resource_tracker.py nova/nova/manager.py nova/nova/scheduler/driver.py nova/nova/scheduler/filter_scheduler.py nova/nova/scheduler/host_manager.py nova/nova/scheduler/baremetal_host_manager.py nova/bin/bm_deploy_server nova/bin/nova-bm-manage Additional setting for bare-metal provisioning [nova.conf] ---------------------------------------------------------- :: # baremetal database connection baremetal_sql_connection = mysql://$ID:$Password@$IP/nova_bm # baremetal compute driver compute_driver = nova.virt.baremetal.driver.BareMetalDriver baremetal_driver = {nova.virt.baremetal.tilera.Tilera | nova.virt.baremetal.pxe.PXE} power_manager = {nova.virt.baremetal.tilera_pdu.Pdu | nova.virt.baremetal.ipmi.Ipmi} # flavor_extra_specs this baremetal compute flavor_extra_specs = cpu_arch:{tilepro64 | x86_64 | arm} # TFTP root baremetal_tftp_root = /tftpboot # baremetal scheduler host manager scheduler_host_manager = nova.scheduler.baremetal_host_manager.BaremetalHostManager Non-PXE (Tilera) Bare-metal Provisioning ---------------------------------------- 1. tilera-bm-instance-creation.rst 2. tilera-bm-installation.rst PXE Bare-metal Provisioning --------------------------- 1. pxe-bm-instance-creation.rst 2. pxe-bm-installation.rst nova-2014.1.5/nova/virt/baremetal/vif_driver.py0000664000567000056700000000562412540642544022460 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import context from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.baremetal import db as bmdb CONF = cfg.CONF LOG = logging.getLogger(__name__) class BareMetalVIFDriver(object): def _after_plug(self, instance, vif, pif): pass def _after_unplug(self, instance, vif, pif): pass def plug(self, instance, vif): LOG.debug(_("plug: instance_uuid=%(uuid)s vif=%(vif)s") % {'uuid': instance['uuid'], 'vif': vif}) vif_uuid = vif['id'] ctx = context.get_admin_context() node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid']) # TODO(deva): optimize this database query # this is just searching for a free physical interface pifs = bmdb.bm_interface_get_all_by_bm_node_id(ctx, node['id']) for pif in pifs: if not pif['vif_uuid']: bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid) LOG.debug(_("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)") % {'id': pif['id'], 'vif_uuid': vif_uuid}) self._after_plug(instance, vif, pif) return # NOTE(deva): should this really be raising an exception # when there are no physical interfaces left? raise exception.NovaException(_( "Baremetal node: %(id)s has no available physical interface" " for virtual interface %(vif_uuid)s") % {'id': node['id'], 'vif_uuid': vif_uuid}) def unplug(self, instance, vif): LOG.debug(_("unplug: instance_uuid=%(uuid)s vif=%(vif)s"), {'uuid': instance['uuid'], 'vif': vif}) vif_uuid = vif['id'] ctx = context.get_admin_context() try: pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid) bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None) LOG.debug(_("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)") % {'id': pif['id'], 'vif_uuid': vif_uuid}) self._after_unplug(instance, vif, pif) except exception.NovaException: LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid) nova-2014.1.5/nova/virt/baremetal/net-static.ubuntu.template0000664000567000056700000000124512540642544025073 0ustar jenkinsjenkins00000000000000# Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback {% for ifc in interfaces -%} auto {{ ifc.name }} iface {{ ifc.name }} inet static address {{ ifc.address }} netmask {{ ifc.netmask }} gateway {{ ifc.gateway }} {%- if ifc.dns %} dns-nameservers {{ ifc.dns }} {%- endif %} {% if use_ipv6 -%} iface {{ ifc.name }} inet6 static address {{ ifc.address_v6 }} netmask {{ ifc.netmask_v6 }} gateway {{ ifc.gateway_v6 }} {%- endif %} {%- endfor %} nova-2014.1.5/nova/virt/baremetal/tilera.py0000664000567000056700000003153612540642544021602 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2013 University of Southern California / ISI # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Class for Tilera bare-metal nodes. """ import base64 import os import jinja2 from oslo.config import cfg from nova.compute import flavors from nova import exception from nova.openstack.common.db import exception as db_exc from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import base from nova.virt.baremetal import db from nova.virt.baremetal import utils as bm_utils LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('net_config_template', 'nova.virt.baremetal.pxe', group='baremetal') def build_network_config(network_info): interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6) tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template) env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path)) template = env.get_template(tmpl_file) return template.render({'interfaces': interfaces, 'use_ipv6': CONF.use_ipv6}) def get_image_dir_path(instance): """Generate the dir for an instances disk.""" return os.path.join(CONF.instances_path, instance['name']) def get_image_file_path(instance): """Generate the full path for an instances disk.""" return os.path.join(CONF.instances_path, instance['name'], 'disk') def get_tilera_nfs_path(node_id): """Generate the path for an instances Tilera nfs.""" tilera_nfs_dir = "fs_" + str(node_id) return os.path.join(CONF.baremetal.tftp_root, tilera_nfs_dir) def get_partition_sizes(instance): flavor = flavors.extract_flavor(instance) root_mb = flavor['root_gb'] * 1024 swap_mb = flavor['swap'] if swap_mb < 1: swap_mb = 1 return (root_mb, swap_mb) def get_tftp_image_info(instance): """Generate the paths for tftp files for this instance. Raises NovaException if - instance does not contain kernel_id """ image_info = { 'kernel': [None, None], } try: image_info['kernel'][0] = str(instance['kernel_id']) except KeyError: pass missing_labels = [] for label in image_info.keys(): (uuid, path) = image_info[label] if not uuid: missing_labels.append(label) else: image_info[label][1] = os.path.join(CONF.baremetal.tftp_root, instance['uuid'], label) if missing_labels: raise exception.NovaException(_( "Can not activate Tilera bootloader. " "The following boot parameters " "were not passed to baremetal driver: %s") % missing_labels) return image_info class Tilera(base.NodeDriver): """Tilera bare metal driver.""" def __init__(self, virtapi): super(Tilera, self).__init__(virtapi) def _collect_mac_addresses(self, context, node): macs = set() for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']): if nic['address']: macs.add(nic['address']) return sorted(macs) def _cache_tftp_images(self, context, instance, image_info): """Fetch the necessary kernels and ramdisks for the instance.""" fileutils.ensure_tree( os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) LOG.debug(_("Fetching kernel and ramdisk for instance %s") % instance['name']) for label in image_info.keys(): (uuid, path) = image_info[label] bm_utils.cache_image( context=context, target=path, image_id=uuid, user_id=instance['user_id'], project_id=instance['project_id'], ) def _cache_image(self, context, instance, image_meta): """Fetch the instance's image from Glance This method pulls the relevant AMI and associated kernel and ramdisk, and the deploy kernel and ramdisk from Glance, and writes them to the appropriate places on local disk. Both sets of kernel and ramdisk are needed for Tilera booting, so these are stored under CONF.baremetal.tftp_root. At present, the AMI is cached and certain files are injected. Debian/ubuntu-specific assumptions are made regarding the injected files. In a future revision, this functionality will be replaced by a more scalable and os-agnostic approach: the deployment ramdisk will fetch from Glance directly, and write its own last-mile configuration. """ fileutils.ensure_tree(get_image_dir_path(instance)) image_path = get_image_file_path(instance) LOG.debug(_("Fetching image %(ami)s for instance %(name)s") % {'ami': image_meta['id'], 'name': instance['name']}) bm_utils.cache_image(context=context, target=image_path, image_id=image_meta['id'], user_id=instance['user_id'], project_id=instance['project_id'], clean=True, ) return [image_meta['id'], image_path] def _inject_into_image(self, context, node, instance, network_info, injected_files=None, admin_password=None): """Inject last-mile configuration into instances image Much of this method is a hack around DHCP and cloud-init not working together with baremetal provisioning yet. """ partition = None if not instance['kernel_id']: partition = "1" ssh_key = None if 'key_data' in instance and instance['key_data']: ssh_key = str(instance['key_data']) if injected_files is None: injected_files = [] else: injected_files = list(injected_files) net_config = build_network_config(network_info) if instance['hostname']: injected_files.append(('/etc/hostname', instance['hostname'])) LOG.debug(_("Injecting files into image for instance %(name)s") % {'name': instance['name']}) bm_utils.inject_into_image( image=get_image_file_path(instance), key=ssh_key, net=net_config, metadata=utils.instance_meta(instance), admin_password=admin_password, files=injected_files, partition=partition, ) def cache_images(self, context, node, instance, admin_password, image_meta, injected_files, network_info): """Prepare all the images for this instance.""" tftp_image_info = get_tftp_image_info(instance) self._cache_tftp_images(context, instance, tftp_image_info) self._cache_image(context, instance, image_meta) self._inject_into_image(context, node, instance, network_info, injected_files, admin_password) def destroy_images(self, context, node, instance): """Delete instance's image file.""" bm_utils.unlink_without_raise(get_image_file_path(instance)) bm_utils.rmtree_without_raise(get_image_dir_path(instance)) def activate_bootloader(self, context, node, instance, network_info): """Configure Tilera boot loader for an instance Kernel and ramdisk images are downloaded by cache_tftp_images, and stored in /tftpboot/{uuid}/ This method writes the instances config file, and then creates symlinks for each MAC address in the instance. By default, the complete layout looks like this: /tftpboot/ ./{uuid}/ kernel ./fs_node_id/ """ image_info = get_tftp_image_info(instance) (root_mb, swap_mb) = get_partition_sizes(instance) tilera_nfs_path = get_tilera_nfs_path(node['id']) image_file_path = get_image_file_path(instance) deployment_key = bm_utils.random_alnum(32) db.bm_node_update(context, node['id'], {'deploy_key': deployment_key, 'image_path': image_file_path, 'pxe_config_path': tilera_nfs_path, 'root_mb': root_mb, 'swap_mb': swap_mb}) if os.path.exists(image_file_path) and \ os.path.exists(tilera_nfs_path): utils.execute('mount', '-o', 'loop', image_file_path, tilera_nfs_path, run_as_root=True) def deactivate_bootloader(self, context, node, instance): """Delete Tilera bootloader images and config.""" try: db.bm_node_update(context, node['id'], {'deploy_key': None, 'image_path': None, 'pxe_config_path': None, 'root_mb': 0, 'swap_mb': 0}) except exception.NodeNotFound: pass tilera_nfs_path = get_tilera_nfs_path(node['id']) if os.path.ismount(tilera_nfs_path): utils.execute('rpc.mountd', run_as_root=True) utils.execute('umount', '-f', tilera_nfs_path, run_as_root=True) try: image_info = get_tftp_image_info(instance) except exception.NovaException: pass else: for label in image_info.keys(): (uuid, path) = image_info[label] bm_utils.unlink_without_raise(path) try: macs = self._collect_mac_addresses(context, node) except db_exc.DBError: pass if os.path.exists(os.path.join(CONF.baremetal.tftp_root, instance['uuid'])): bm_utils.rmtree_without_raise( os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) def _iptables_set(self, node_ip, user_data): """Sets security setting (iptables:port) if needed. iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP /tftpboot/iptables_rule script sets iptables rule on the given node. """ rule_path = CONF.baremetal.tftp_root + "/iptables_rule" if user_data is not None: open_ip = base64.b64decode(user_data) utils.execute(rule_path, node_ip, open_ip) def activate_node(self, context, node, instance): """Wait for Tilera deployment to complete.""" locals = {'error': '', 'started': False} try: row = db.bm_node_get(context, node['id']) if instance['uuid'] != row.get('instance_uuid'): locals['error'] = _("Node associated with another instance" " while waiting for deploy of %s") status = row.get('task_state') if (status == baremetal_states.DEPLOYING and locals['started'] == False): LOG.info(_('Tilera deploy started for instance %s') % instance['uuid']) locals['started'] = True elif status in (baremetal_states.DEPLOYDONE, baremetal_states.BUILDING, baremetal_states.ACTIVE): LOG.info(_("Tilera deploy completed for instance %s") % instance['uuid']) node_ip = node['pm_address'] user_data = instance['user_data'] try: self._iptables_set(node_ip, user_data) except Exception: self.deactivate_bootloader(context, node, instance) raise exception.NovaException(_("Node is " "unknown error state.")) elif status == baremetal_states.DEPLOYFAIL: locals['error'] = _("Tilera deploy failed for instance %s") except exception.NodeNotFound: locals['error'] = _("Baremetal node deleted while waiting " "for deployment of instance %s") if locals['error']: raise exception.InstanceDeployFailure( locals['error'] % instance['uuid']) def deactivate_node(self, context, node, instance): pass nova-2014.1.5/nova/virt/baremetal/pxe.py0000664000567000056700000004662412540642544021122 0ustar jenkinsjenkins00000000000000# Copyright 2012,2014 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Class for PXE bare-metal nodes. """ import datetime import os import jinja2 from oslo.config import cfg from nova.compute import flavors from nova import exception from nova.objects import flavor as flavor_obj from nova.openstack.common.db import exception as db_exc from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.openstack.common import timeutils from nova import utils from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import base from nova.virt.baremetal import db from nova.virt.baremetal import utils as bm_utils pxe_opts = [ cfg.StrOpt('deploy_kernel', help='Default kernel image ID used in deployment phase'), cfg.StrOpt('deploy_ramdisk', help='Default ramdisk image ID used in deployment phase'), cfg.StrOpt('net_config_template', default='$pybasedir/nova/virt/baremetal/' 'net-dhcp.ubuntu.template', help='Template file for injected network config'), cfg.StrOpt('pxe_append_params', default='nofb nomodeset vga=normal', help='Additional append parameters for baremetal PXE boot'), cfg.StrOpt('pxe_config_template', default='$pybasedir/nova/virt/baremetal/pxe_config.template', help='Template file for PXE configuration'), cfg.BoolOpt('use_file_injection', help='If True, enable file injection for network info, ' 'files and admin password', default=False), cfg.IntOpt('pxe_deploy_timeout', help='Timeout for PXE deployments. Default: 0 (unlimited)', default=0), cfg.BoolOpt('pxe_network_config', help='If set, pass the network configuration details to the ' 'initramfs via cmdline.', default=False), cfg.StrOpt('pxe_bootfile_name', help='This gets passed to Neutron as the bootfile dhcp ' 'parameter.', default='pxelinux.0'), ] LOG = logging.getLogger(__name__) baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(pxe_opts, baremetal_group) CONF.import_opt('use_ipv6', 'nova.netconf') def build_pxe_network_config(network_info): interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6) template = None if not CONF.use_ipv6: template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off" else: template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:" "[%(netmask_v6)s]::%(name)s:off") net_config = [template % iface for iface in interfaces] return ' '.join(net_config) def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn, deployment_aki_path, deployment_ari_path, aki_path, ari_path, network_info): """Build the PXE config file for a node This method builds the PXE boot configuration file for a node, given all the required parameters. The resulting file has both a "deploy" and "boot" label, which correspond to the two phases of booting. This may be extended later. """ LOG.debug(_("Building PXE config for deployment %s.") % deployment_id) network_config = None if network_info and CONF.baremetal.pxe_network_config: network_config = build_pxe_network_config(network_info) pxe_options = { 'deployment_id': deployment_id, 'deployment_key': deployment_key, 'deployment_iscsi_iqn': deployment_iscsi_iqn, 'deployment_aki_path': deployment_aki_path, 'deployment_ari_path': deployment_ari_path, 'aki_path': aki_path, 'ari_path': ari_path, 'pxe_append_params': CONF.baremetal.pxe_append_params, 'pxe_network_config': network_config, } tmpl_path, tmpl_file = os.path.split(CONF.baremetal.pxe_config_template) env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path)) template = env.get_template(tmpl_file) return template.render({'pxe_options': pxe_options, 'ROOT': '${ROOT}'}) def build_network_config(network_info): interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6) tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template) env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path)) template = env.get_template(tmpl_file) return template.render({'interfaces': interfaces, 'use_ipv6': CONF.use_ipv6}) def get_deploy_aki_id(flavor): return flavor.get('extra_specs', {}).\ get('baremetal:deploy_kernel_id', CONF.baremetal.deploy_kernel) def get_deploy_ari_id(flavor): return flavor.get('extra_specs', {}).\ get('baremetal:deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk) def get_image_dir_path(instance): """Generate the dir for an instances disk.""" return os.path.join(CONF.instances_path, instance['name']) def get_image_file_path(instance): """Generate the full path for an instances disk.""" return os.path.join(CONF.instances_path, instance['name'], 'disk') def get_pxe_config_file_path(instance): """Generate the path for an instances PXE config file.""" return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config') def get_partition_sizes(instance): flavor = flavors.extract_flavor(instance) root_mb = flavor['root_gb'] * 1024 swap_mb = flavor['swap'] ephemeral_mb = flavor['ephemeral_gb'] * 1024 # NOTE(deva): For simpler code paths on the deployment side, # we always create a swap partition. If the flavor # does not specify any swap, we default to 1MB if swap_mb < 1: swap_mb = 1 return (root_mb, swap_mb, ephemeral_mb) def get_pxe_mac_path(mac): """Convert a MAC address into a PXE config file name.""" return os.path.join( CONF.baremetal.tftp_root, 'pxelinux.cfg', "01-" + mac.replace(":", "-").lower() ) def get_tftp_image_info(instance, flavor): """Generate the paths for tftp files for this instance Raises NovaException if - instance does not contain kernel_id or ramdisk_id - deploy_kernel_id or deploy_ramdisk_id can not be read from flavor['extra_specs'] and defaults are not set """ image_info = { 'kernel': [None, None], 'ramdisk': [None, None], 'deploy_kernel': [None, None], 'deploy_ramdisk': [None, None], } try: image_info['kernel'][0] = str(instance['kernel_id']) image_info['ramdisk'][0] = str(instance['ramdisk_id']) image_info['deploy_kernel'][0] = get_deploy_aki_id(flavor) image_info['deploy_ramdisk'][0] = get_deploy_ari_id(flavor) except KeyError: pass missing_labels = [] for label in image_info.keys(): (uuid, path) = image_info[label] if not uuid: missing_labels.append(label) else: image_info[label][1] = os.path.join(CONF.baremetal.tftp_root, instance['uuid'], label) if missing_labels: raise exception.NovaException(_( "Can not activate PXE bootloader. The following boot parameters " "were not passed to baremetal driver: %s") % missing_labels) return image_info class PXE(base.NodeDriver): """PXE bare metal driver.""" def __init__(self, virtapi): super(PXE, self).__init__(virtapi) def _collect_mac_addresses(self, context, node): macs = set() for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']): if nic['address']: macs.add(nic['address']) return sorted(macs) def _cache_tftp_images(self, context, instance, image_info): """Fetch the necessary kernels and ramdisks for the instance.""" fileutils.ensure_tree( os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) LOG.debug(_("Fetching kernel and ramdisk for instance %s") % instance['name']) for label in image_info.keys(): (uuid, path) = image_info[label] bm_utils.cache_image( context=context, target=path, image_id=uuid, user_id=instance['user_id'], project_id=instance['project_id'], ) def _cache_image(self, context, instance, image_meta): """Fetch the instance's image from Glance This method pulls the relevant AMI and associated kernel and ramdisk, and the deploy kernel and ramdisk from Glance, and writes them to the appropriate places on local disk. Both sets of kernel and ramdisk are needed for PXE booting, so these are stored under CONF.baremetal.tftp_root. At present, the AMI is cached and certain files are injected. Debian/ubuntu-specific assumptions are made regarding the injected files. In a future revision, this functionality will be replaced by a more scalable and os-agnostic approach: the deployment ramdisk will fetch from Glance directly, and write its own last-mile configuration. """ fileutils.ensure_tree(get_image_dir_path(instance)) image_path = get_image_file_path(instance) LOG.debug(_("Fetching image %(ami)s for instance %(name)s") % {'ami': image_meta['id'], 'name': instance['name']}) bm_utils.cache_image(context=context, target=image_path, image_id=image_meta['id'], user_id=instance['user_id'], project_id=instance['project_id'], clean=True, ) return [image_meta['id'], image_path] def _inject_into_image(self, context, node, instance, network_info, injected_files=None, admin_password=None): """Inject last-mile configuration into instances image Much of this method is a hack around DHCP and cloud-init not working together with baremetal provisioning yet. """ # NOTE(deva): We assume that if we're not using a kernel, # then the target partition is the first partition partition = None if not instance['kernel_id']: partition = "1" ssh_key = None if 'key_data' in instance and instance['key_data']: ssh_key = str(instance['key_data']) if injected_files is None: injected_files = [] else: # NOTE(deva): copy so we dont modify the original injected_files = list(injected_files) net_config = build_network_config(network_info) if instance['hostname']: injected_files.append(('/etc/hostname', instance['hostname'])) LOG.debug(_("Injecting files into image for instance %(name)s") % {'name': instance['name']}) bm_utils.inject_into_image( image=get_image_file_path(instance), key=ssh_key, net=net_config, metadata=utils.instance_meta(instance), admin_password=admin_password, files=injected_files, partition=partition, ) def cache_images(self, context, node, instance, admin_password, image_meta, injected_files, network_info): """Prepare all the images for this instance.""" flavor = flavor_obj.Flavor.get_by_id(context, instance['instance_type_id']) tftp_image_info = get_tftp_image_info(instance, flavor) self._cache_tftp_images(context, instance, tftp_image_info) self._cache_image(context, instance, image_meta) if CONF.baremetal.use_file_injection: self._inject_into_image(context, node, instance, network_info, injected_files, admin_password) def destroy_images(self, context, node, instance): """Delete instance's image file.""" bm_utils.unlink_without_raise(get_image_file_path(instance)) bm_utils.rmtree_without_raise(get_image_dir_path(instance)) def dhcp_options_for_instance(self, instance): return [{'opt_name': 'bootfile-name', 'opt_value': CONF.baremetal.pxe_bootfile_name}, {'opt_name': 'server-ip-address', 'opt_value': CONF.my_ip}, {'opt_name': 'tftp-server', 'opt_value': CONF.my_ip} ] def activate_bootloader(self, context, node, instance, network_info): """Configure PXE boot loader for an instance Kernel and ramdisk images are downloaded by cache_tftp_images, and stored in /tftpboot/{uuid}/ This method writes the instances config file, and then creates symlinks for each MAC address in the instance. By default, the complete layout looks like this: /tftpboot/ ./{uuid}/ kernel ramdisk deploy_kernel deploy_ramdisk config ./pxelinux.cfg/ {mac} -> ../{uuid}/config """ flavor = flavor_obj.Flavor.get_by_id(context, instance['instance_type_id']) image_info = get_tftp_image_info(instance, flavor) (root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance) pxe_config_file_path = get_pxe_config_file_path(instance) image_file_path = get_image_file_path(instance) deployment_key = bm_utils.random_alnum(32) deployment_iscsi_iqn = "iqn-%s" % instance['uuid'] db.bm_node_update(context, node['id'], {'deploy_key': deployment_key, 'image_path': image_file_path, 'pxe_config_path': pxe_config_file_path, 'root_mb': root_mb, 'swap_mb': swap_mb, 'ephemeral_mb': ephemeral_mb}) pxe_config = build_pxe_config( node['id'], deployment_key, deployment_iscsi_iqn, image_info['deploy_kernel'][1], image_info['deploy_ramdisk'][1], image_info['kernel'][1], image_info['ramdisk'][1], network_info, ) bm_utils.write_to_file(pxe_config_file_path, pxe_config) macs = self._collect_mac_addresses(context, node) for mac in macs: mac_path = get_pxe_mac_path(mac) bm_utils.unlink_without_raise(mac_path) bm_utils.create_link_without_raise(pxe_config_file_path, mac_path) def deactivate_bootloader(self, context, node, instance): """Delete PXE bootloader images and config.""" try: db.bm_node_update(context, node['id'], {'deploy_key': None, 'image_path': None, 'pxe_config_path': None, 'root_mb': 0, 'swap_mb': 0}) except exception.NodeNotFound: pass # NOTE(danms): the flavor extra_specs do not need to be # present/correct at deactivate time, so pass something empty # to avoid an extra lookup flavor = dict(extra_specs={ 'baremetal:deploy_ramdisk_id': 'ignore', 'baremetal:deploy_kernel_id': 'ignore'}) try: image_info = get_tftp_image_info(instance, flavor) except exception.NovaException: pass else: for label in image_info.keys(): (uuid, path) = image_info[label] bm_utils.unlink_without_raise(path) bm_utils.unlink_without_raise(get_pxe_config_file_path(instance)) try: macs = self._collect_mac_addresses(context, node) except db_exc.DBError: pass else: for mac in macs: bm_utils.unlink_without_raise(get_pxe_mac_path(mac)) bm_utils.rmtree_without_raise( os.path.join(CONF.baremetal.tftp_root, instance['uuid'])) def activate_node(self, context, node, instance): """Wait for PXE deployment to complete.""" locals = {'error': '', 'started': False} def _wait_for_deploy(): """Called at an interval until the deployment completes.""" try: row = db.bm_node_get(context, node['id']) if instance['uuid'] != row.get('instance_uuid'): locals['error'] = _("Node associated with another instance" " while waiting for deploy of %s") raise loopingcall.LoopingCallDone() status = row.get('task_state') if (status == baremetal_states.DEPLOYING and locals['started'] == False): LOG.info(_("PXE deploy started for instance %s") % instance['uuid']) locals['started'] = True elif status in (baremetal_states.DEPLOYDONE, baremetal_states.ACTIVE): LOG.info(_("PXE deploy completed for instance %s") % instance['uuid']) raise loopingcall.LoopingCallDone() elif status == baremetal_states.DEPLOYFAIL: locals['error'] = _("PXE deploy failed for instance %s") except exception.NodeNotFound: locals['error'] = _("Baremetal node deleted while waiting " "for deployment of instance %s") if (CONF.baremetal.pxe_deploy_timeout and timeutils.utcnow() > expiration): locals['error'] = _("Timeout reached while waiting for " "PXE deploy of instance %s") if locals['error']: raise loopingcall.LoopingCallDone() expiration = timeutils.utcnow() + datetime.timedelta( seconds=CONF.baremetal.pxe_deploy_timeout) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy) timer.start(interval=1).wait() if locals['error']: raise exception.InstanceDeployFailure( locals['error'] % instance['uuid']) def deactivate_node(self, context, node, instance): pass nova-2014.1.5/nova/virt/baremetal/baremetal_states.py0000664000567000056700000000222412540642544023631 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Possible baremetal node states for instances. Compute instance baremetal states represent the state of an instance as it pertains to a user or administrator. When combined with task states (task_states.py), a better picture can be formed regarding the instance's health. """ NULL = None INIT = 'initializing' ACTIVE = 'active' BUILDING = 'building' DEPLOYING = 'deploying' DEPLOYFAIL = 'deploy failed' DEPLOYDONE = 'deploy complete' DELETED = 'deleted' ERROR = 'error' PREPARED = 'prepared' nova-2014.1.5/nova/virt/baremetal/ipmi.py0000664000567000056700000002502012540642544021247 0ustar jenkinsjenkins00000000000000# coding=utf-8 # Copyright 2012 Hewlett-Packard Development Company, L.P. # Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Baremetal IPMI power manager. """ import os import stat import tempfile from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova import paths from nova import utils from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import base from nova.virt.baremetal import utils as bm_utils opts = [ cfg.StrOpt('terminal', default='shellinaboxd', help='Path to baremetal terminal program'), cfg.StrOpt('terminal_cert_dir', help='Path to baremetal terminal SSL cert(PEM)'), cfg.StrOpt('terminal_pid_dir', default=paths.state_path_def('baremetal/console'), help='Path to directory stores pidfiles of baremetal_terminal'), cfg.IntOpt('ipmi_power_retry', default=10, help='Maximal number of retries for IPMI operations'), ] baremetal_group = cfg.OptGroup(name='baremetal', title='Baremetal Options') CONF = cfg.CONF CONF.register_group(baremetal_group) CONF.register_opts(opts, baremetal_group) LOG = logging.getLogger(__name__) def _make_password_file(password): fd, path = tempfile.mkstemp() os.fchmod(fd, stat.S_IRUSR | stat.S_IWUSR) with os.fdopen(fd, "w") as f: # NOTE(r-mibu): Since ipmitool hangs with an empty password file, # we have to write '\0' if password was empty. # see https://bugs.launchpad.net/nova/+bug/1237802 for more details f.write(password or b"\0") return path def _get_console_pid_path(node_id): name = "%s.pid" % node_id path = os.path.join(CONF.baremetal.terminal_pid_dir, name) return path def _get_console_pid(node_id): pid_path = _get_console_pid_path(node_id) if os.path.exists(pid_path): with open(pid_path, 'r') as f: pid_str = f.read() try: return int(pid_str) except ValueError: LOG.warn(_("pid file %s does not contain any pid"), pid_path) return None class IPMI(base.PowerManager): """IPMI Power Driver for Baremetal Nova Compute This PowerManager class provides mechanism for controlling the power state of physical hardware via IPMI calls. It also provides serial console access where available. """ def __init__(self, node, **kwargs): self.state = None self.retries = None self.node_id = node['id'] self.address = node['pm_address'] self.user = node['pm_user'] self.password = node['pm_password'] self.port = node['terminal_port'] if self.node_id == None: raise exception.InvalidParameterValue(_("Node id not supplied " "to IPMI")) if self.address == None: raise exception.InvalidParameterValue(_("Address not supplied " "to IPMI")) if self.user == None: raise exception.InvalidParameterValue(_("User not supplied " "to IPMI")) if self.password == None: raise exception.InvalidParameterValue(_("Password not supplied " "to IPMI")) def _exec_ipmitool(self, command): args = ['ipmitool', '-I', 'lanplus', '-H', self.address, '-U', self.user, '-f'] pwfile = _make_password_file(self.password) try: args.append(pwfile) args.extend(command.split(" ")) out, err = utils.execute(*args, attempts=3) LOG.debug(_("ipmitool stdout: '%(out)s', stderr: '%(err)s'"), {'out': out, 'err': err}) return out, err finally: bm_utils.unlink_without_raise(pwfile) def _power_on(self): """Turn the power to this node ON.""" def _wait_for_power_on(): """Called at an interval until the node's power is on.""" if self.is_power_on(): self.state = baremetal_states.ACTIVE raise loopingcall.LoopingCallDone() if self.retries > CONF.baremetal.ipmi_power_retry: LOG.error(_("IPMI power on failed after %d tries") % ( CONF.baremetal.ipmi_power_retry)) self.state = baremetal_states.ERROR raise loopingcall.LoopingCallDone() try: self.retries += 1 if not self.power_on_called: self._exec_ipmitool("power on") self.power_on_called = True except Exception: LOG.exception(_("IPMI power on failed")) self.retries = 0 self.power_on_called = False timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_on) timer.start(interval=1.0).wait() def _power_off(self): """Turn the power to this node OFF.""" def _wait_for_power_off(): """Called at an interval until the node's power is off.""" if self.is_power_on() is False: self.state = baremetal_states.DELETED raise loopingcall.LoopingCallDone() if self.retries > CONF.baremetal.ipmi_power_retry: LOG.error(_("IPMI power off failed after %d tries") % ( CONF.baremetal.ipmi_power_retry)) self.state = baremetal_states.ERROR raise loopingcall.LoopingCallDone() try: self.retries += 1 if not self.power_off_called: self._exec_ipmitool("power off") self.power_off_called = True except Exception: LOG.exception(_("IPMI power off failed")) self.retries = 0 self.power_off_called = False timer = loopingcall.FixedIntervalLoopingCall(_wait_for_power_off) timer.start(interval=1.0).wait() def _set_pxe_for_next_boot(self): try: self._exec_ipmitool("chassis bootdev pxe options=persistent") except Exception: LOG.exception(_("IPMI set next bootdev failed")) def activate_node(self): """Turns the power to node ON. Sets node next-boot to PXE and turns the power on, waiting up to ipmi_power_retry/2 seconds for confirmation that the power is on. :returns: One of baremetal_states.py, representing the new state. """ if self.is_power_on() and self.state == baremetal_states.ACTIVE: LOG.warning(_("Activate node called, but node %s " "is already active") % self.address) self._set_pxe_for_next_boot() self._power_on() return self.state def reboot_node(self): """Cycles the power to a node. Turns the power off, sets next-boot to PXE, and turns the power on. Each action waits up to ipmi_power_retry/2 seconds for confirmation that the power state has changed. :returns: One of baremetal_states.py, representing the new state. """ self._power_off() self._set_pxe_for_next_boot() self._power_on() return self.state def deactivate_node(self): """Turns the power to node OFF. Turns the power off, and waits up to ipmi_power_retry/2 seconds for confirmation that the power is off. :returns: One of baremetal_states.py, representing the new state. """ self._power_off() return self.state def is_power_on(self): """Check if the power is currently on. :returns: True if on; False if off; None if unable to determine. """ # NOTE(deva): string matching based on # http://ipmitool.cvs.sourceforge.net/ # viewvc/ipmitool/ipmitool/lib/ipmi_chassis.c res = self._exec_ipmitool("power status")[0] if res == ("Chassis Power is on\n"): return True elif res == ("Chassis Power is off\n"): return False return None def start_console(self): if not self.port: return args = [] args.append(CONF.baremetal.terminal) if CONF.baremetal.terminal_cert_dir: args.append("-c") args.append(CONF.baremetal.terminal_cert_dir) else: args.append("-t") args.append("-p") args.append(str(self.port)) args.append("--background=%s" % _get_console_pid_path(self.node_id)) args.append("-s") try: pwfile = _make_password_file(self.password) ipmi_args = "/:%(uid)s:%(gid)s:HOME:ipmitool -H %(address)s" \ " -I lanplus -U %(user)s -f %(pwfile)s sol activate" \ % {'uid': os.getuid(), 'gid': os.getgid(), 'address': self.address, 'user': self.user, 'pwfile': pwfile, } args.append(ipmi_args) # Run shellinaboxd without pipes. Otherwise utils.execute() waits # infinitely since shellinaboxd does not close passed fds. x = ["'" + arg.replace("'", "'\\''") + "'" for arg in args] x.append('/dev/null') x.append('2>&1') utils.execute(' '.join(x), shell=True) finally: bm_utils.unlink_without_raise(pwfile) def stop_console(self): console_pid = _get_console_pid(self.node_id) if console_pid: # Allow exitcode 99 (RC_UNAUTHORIZED) utils.execute('kill', '-TERM', str(console_pid), run_as_root=True, check_exit_code=[0, 99]) bm_utils.unlink_without_raise(_get_console_pid_path(self.node_id)) nova-2014.1.5/nova/virt/baremetal/utils.py0000664000567000056700000001043312540642544021453 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import errno import os import shutil from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.virt.disk import api as disk_api from nova.virt.libvirt import utils as libvirt_utils LOG = logging.getLogger(__name__) def cache_image(context, target, image_id, user_id, project_id, clean=False): if clean and os.path.exists(target): os.unlink(target) if not os.path.exists(target): libvirt_utils.fetch_image(context, target, image_id, user_id, project_id) def inject_into_image(image, key, net, metadata, admin_password, files, partition, use_cow=False): try: disk_api.inject_data(image, key, net, metadata, admin_password, files, partition, use_cow) except Exception as e: LOG.warn(_("Failed to inject data into image %(image)s. " "Error: %(e)s"), {'image': image, 'e': e}) def unlink_without_raise(path): try: os.unlink(path) except OSError as e: if e.errno == errno.ENOENT: return else: LOG.warn(_("Failed to unlink %(path)s, error: %(e)s"), {'path': path, 'e': e}) def rmtree_without_raise(path): try: if os.path.isdir(path): shutil.rmtree(path) except OSError as e: LOG.warn(_("Failed to remove dir %(path)s, error: %(e)s"), {'path': path, 'e': e}) def write_to_file(path, contents): with open(path, 'w') as f: f.write(contents) def create_link_without_raise(source, link): try: os.symlink(source, link) except OSError as e: if e.errno == errno.EEXIST: return else: LOG.warn(_("Failed to create symlink from %(source)s to %(link)s" ", error: %(e)s"), {'source': source, 'link': link, 'e': e}) def random_alnum(count): import random import string chars = string.ascii_uppercase + string.digits return "".join(random.choice(chars) for _ in range(count)) def map_network_interfaces(network_info, use_ipv6=False): # TODO(deva): fix assumption that device names begin with "eth" # and fix assumption about ordering if not isinstance(network_info, list): network_info = [network_info] interfaces = [] for id, vif in enumerate(network_info): address_v6 = gateway_v6 = netmask_v6 = None address_v4 = gateway_v4 = netmask_v4 = dns_v4 = None if use_ipv6: subnets_v6 = [s for s in vif['network']['subnets'] if s['version'] == 6] if len(subnets_v6): address_v6 = subnets_v6[0]['ips'][0]['address'] netmask_v6 = subnets_v6[0].as_netaddr()._prefixlen gateway_v6 = subnets_v6[0]['gateway']['address'] subnets_v4 = [s for s in vif['network']['subnets'] if s['version'] == 4] if len(subnets_v4): address_v4 = subnets_v4[0]['ips'][0]['address'] netmask_v4 = subnets_v4[0].as_netaddr().netmask gateway_v4 = subnets_v4[0]['gateway']['address'] dns_v4 = ' '.join([x['address'] for x in subnets_v4[0]['dns']]) interface = { 'name': 'eth%d' % id, 'address': address_v4, 'gateway': gateway_v4, 'netmask': netmask_v4, 'dns': dns_v4, 'address_v6': address_v6, 'gateway_v6': gateway_v6, 'netmask_v6': netmask_v6, } interfaces.append(interface) return interfaces nova-2014.1.5/nova/virt/cpu.py0000664000567000056700000000611512540642544017150 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) virt_cpu_opts = [ cfg.StrOpt('vcpu_pin_set', help='Defines which pcpus that instance vcpus can use. ' 'For example, "4-12,^8,15"', deprecated_group='libvirt'), ] CONF = cfg.CONF CONF.register_opts(virt_cpu_opts) def get_cpuset_ids(): """Parsing vcpu_pin_set config. Returns a list of pcpu ids can be used by instances. """ if not CONF.vcpu_pin_set: return None cpuset_ids = set() cpuset_reject_ids = set() for rule in CONF.vcpu_pin_set.split(','): rule = rule.strip() # Handle multi ',' if len(rule) < 1: continue # Note the count limit in the .split() call range_parts = rule.split('-', 1) if len(range_parts) > 1: # So, this was a range; start by converting the parts to ints try: start, end = [int(p.strip()) for p in range_parts] except ValueError: raise exception.Invalid(_("Invalid range expression %r") % rule) # Make sure it's a valid range if start > end: raise exception.Invalid(_("Invalid range expression %r") % rule) # Add available pcpu ids to set cpuset_ids |= set(range(start, end + 1)) elif rule[0] == '^': # Not a range, the rule is an exclusion rule; convert to int try: cpuset_reject_ids.add(int(rule[1:].strip())) except ValueError: raise exception.Invalid(_("Invalid exclusion " "expression %r") % rule) else: # OK, a single PCPU to include; convert to int try: cpuset_ids.add(int(rule)) except ValueError: raise exception.Invalid(_("Invalid inclusion " "expression %r") % rule) # Use sets to handle the exclusion rules for us cpuset_ids -= cpuset_reject_ids if not cpuset_ids: raise exception.Invalid(_("No CPUs available after parsing %r") % CONF.vcpu_pin_set) # This will convert the set to a sorted list for us return sorted(cpuset_ids) nova-2014.1.5/nova/virt/__init__.py0000664000567000056700000000115012540642544020112 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. nova-2014.1.5/nova/virt/images.py0000664000567000056700000001164712540642544017634 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handling of VM disk images. """ import os from oslo.config import cfg from nova import exception from nova.image import glance from nova.openstack.common import fileutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import imageutils from nova.openstack.common import log as logging from nova import utils LOG = logging.getLogger(__name__) image_opts = [ cfg.BoolOpt('force_raw_images', default=True, help='Force backing images to raw format'), ] CONF = cfg.CONF CONF.register_opts(image_opts) def qemu_img_info(path): """Return an object containing the parsed output from qemu-img info.""" # TODO(mikal): this code should not be referring to a libvirt specific # flag. if not os.path.exists(path) and CONF.libvirt.images_type != 'rbd': return imageutils.QemuImgInfo() out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', path) return imageutils.QemuImgInfo(out) def convert_image(source, dest, out_format, run_as_root=False): """Convert image to other format.""" cmd = ('qemu-img', 'convert', '-O', out_format, source, dest) utils.execute(*cmd, run_as_root=run_as_root) def fetch(context, image_href, path, _user_id, _project_id, max_size=0): # TODO(vish): Improve context handling and add owner and auth data # when it is added to glance. Right now there is no # auth checking in glance, so we assume that access was # checked before we got here. (image_service, image_id) = glance.get_remote_image_service(context, image_href) with fileutils.remove_path_on_error(path): image_service.download(context, image_id, dst_path=path) def fetch_to_raw(context, image_href, path, user_id, project_id, max_size=0): path_tmp = "%s.part" % path fetch(context, image_href, path_tmp, user_id, project_id, max_size=max_size) with fileutils.remove_path_on_error(path_tmp): data = qemu_img_info(path_tmp) fmt = data.file_format if fmt is None: raise exception.ImageUnacceptable( reason=_("'qemu-img info' parsing failed."), image_id=image_href) backing_file = data.backing_file if backing_file is not None: raise exception.ImageUnacceptable(image_id=image_href, reason=(_("fmt=%(fmt)s backed by: %(backing_file)s") % {'fmt': fmt, 'backing_file': backing_file})) # We can't generally shrink incoming images, so disallow # images > size of the flavor we're booting. Checking here avoids # an immediate DoS where we convert large qcow images to raw # (which may compress well but not be sparse). # TODO(p-draigbrady): loop through all flavor sizes, so that # we might continue here and not discard the download. # If we did that we'd have to do the higher level size checks # irrespective of whether the base image was prepared or not. disk_size = data.virtual_size if max_size and max_size < disk_size: msg = _('%(base)s virtual size %(disk_size)s ' 'larger than flavor root disk size %(size)s') LOG.error(msg % {'base': path, 'disk_size': disk_size, 'size': max_size}) raise exception.FlavorDiskTooSmall() if fmt != "raw" and CONF.force_raw_images: staged = "%s.converted" % path LOG.debug("%s was %s, converting to raw" % (image_href, fmt)) with fileutils.remove_path_on_error(staged): convert_image(path_tmp, staged, 'raw') os.unlink(path_tmp) data = qemu_img_info(staged) if data.file_format != "raw": raise exception.ImageUnacceptable(image_id=image_href, reason=_("Converted to raw, but format is now %s") % data.file_format) os.rename(staged, path) else: os.rename(path_tmp, path) nova-2014.1.5/nova/virt/volumeutils.py0000664000567000056700000000227412540642544020753 0ustar jenkinsjenkins00000000000000# Copyright 2014 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Volume utilities for virt drivers. """ from nova import exception from nova import utils def get_iscsi_initiator(): """Get iscsi initiator name for this machine.""" # NOTE(vish) openiscsi stores initiator name in a file that # needs root permission to read. try: contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi') except exception.FileNotFound: return None for l in contents.split('\n'): if l.startswith('InitiatorName='): return l[l.index('=') + 1:].strip() nova-2014.1.5/nova/virt/firewall.py0000664000567000056700000005340512540642544020172 0ustar jenkinsjenkins00000000000000# Copyright 2011 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2011 Citrix Systems, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.compute import utils as compute_utils from nova import context from nova.network import linux_net from nova.objects import instance as instance_obj from nova.objects import security_group as security_group_obj from nova.objects import security_group_rule as security_group_rule_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils from nova.virt import netutils LOG = logging.getLogger(__name__) firewall_opts = [ cfg.StrOpt('firewall_driver', help='Firewall driver ' '(defaults to hypervisor specific iptables driver)'), cfg.BoolOpt('allow_same_net_traffic', default=True, help='Whether to allow network traffic from same network'), ] CONF = cfg.CONF CONF.register_opts(firewall_opts) CONF.import_opt('use_ipv6', 'nova.netconf') def load_driver(default, *args, **kwargs): fw_class = importutils.import_class(CONF.firewall_driver or default) return fw_class(*args, **kwargs) class FirewallDriver(object): """Firewall Driver base class. Defines methods that any driver providing security groups and provider firewall functionality should implement. """ def __init__(self, virtapi): self._virtapi = virtapi def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet. """ raise NotImplementedError() def filter_defer_apply_on(self): """Defer application of IPTables rules.""" pass def filter_defer_apply_off(self): """Turn off deferral of IPTables rules and apply the rules now.""" pass def unfilter_instance(self, instance, network_info): """Stop filtering instance.""" raise NotImplementedError() def apply_instance_filter(self, instance, network_info): """Apply instance filter. Once this method returns, the instance should be firewalled appropriately. This method should as far as possible be a no-op. It's vastly preferred to get everything set up in prepare_instance_filter. """ raise NotImplementedError() def refresh_security_group_rules(self, security_group_id): """Refresh security group rules from data store Gets called when a rule has been added to or removed from the security group. """ raise NotImplementedError() def refresh_security_group_members(self, security_group_id): """Refresh security group members from data store Gets called when an instance gets added to or removed from the security group. """ raise NotImplementedError() def refresh_instance_security_rules(self, instance): """Refresh security group rules from data store Gets called when an instance gets added to or removed from the security group the instance is a member of or if the group gains or looses a rule. """ raise NotImplementedError() def refresh_provider_fw_rules(self): """Refresh common rules for all hosts/instances from data store. Gets called when a rule has been added to or removed from the list of rules (via admin api). """ raise NotImplementedError() def setup_basic_filtering(self, instance, network_info): """Create rules to block spoofing and allow dhcp. This gets called when spawning an instance, before :py:meth:`prepare_instance_filter`. """ raise NotImplementedError() def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists.""" raise NotImplementedError() class IptablesFirewallDriver(FirewallDriver): """Driver which enforces security groups through iptables rules.""" def __init__(self, virtapi, **kwargs): super(IptablesFirewallDriver, self).__init__(virtapi) self.iptables = linux_net.iptables_manager self.instance_info = {} self.basically_filtered = False # Flags for DHCP request rule self.dhcp_create = False self.dhcp_created = False self.iptables.ipv4['filter'].add_chain('sg-fallback') self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') def setup_basic_filtering(self, instance, network_info): pass def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter.""" pass def filter_defer_apply_on(self): self.iptables.defer_apply_on() def filter_defer_apply_off(self): self.iptables.defer_apply_off() def unfilter_instance(self, instance, network_info): if self.instance_info.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() else: LOG.info(_('Attempted to unfilter instance which is not ' 'filtered'), instance=instance) def prepare_instance_filter(self, instance, network_info): self.instance_info[instance['id']] = (instance, network_info) ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) self.add_filters_for_instance(instance, network_info, ipv4_rules, ipv6_rules) LOG.debug(_('Filters added to instance'), instance=instance) self.refresh_provider_fw_rules() LOG.debug(_('Provider Firewall Rules refreshed'), instance=instance) # Ensure that DHCP request rule is updated if necessary if (self.dhcp_create and not self.dhcp_created): self.iptables.ipv4['filter'].add_rule( 'INPUT', '-s 0.0.0.0/32 -d 255.255.255.255/32 ' '-p udp -m udp --sport 68 --dport 67 -j ACCEPT') self.iptables.ipv4['filter'].add_rule( 'FORWARD', '-s 0.0.0.0/32 -d 255.255.255.255/32 ' '-p udp -m udp --sport 68 --dport 67 -j ACCEPT') self.dhcp_created = True self.iptables.apply() def _create_filter(self, ips, chain_name): return ['-d %s -j $%s' % (ip, chain_name) for ip in ips] def _get_subnets(self, network_info, version): subnets = [] for vif in network_info: if 'network' in vif and 'subnets' in vif['network']: for subnet in vif['network']['subnets']: if subnet['version'] == version: subnets.append(subnet) return subnets def _filters_for_instance(self, chain_name, network_info): """Creates a rule corresponding to each ip that defines a jump to the corresponding instance - chain for all the traffic destined to that ip. """ v4_subnets = self._get_subnets(network_info, 4) v6_subnets = self._get_subnets(network_info, 6) ips_v4 = [ip['address'] for subnet in v4_subnets for ip in subnet['ips']] ipv4_rules = self._create_filter(ips_v4, chain_name) ipv6_rules = ips_v6 = [] if CONF.use_ipv6: if v6_subnets: ips_v6 = [ip['address'] for subnet in v6_subnets for ip in subnet['ips']] ipv6_rules = self._create_filter(ips_v6, chain_name) return ipv4_rules, ipv6_rules def _add_filters(self, chain_name, ipv4_rules, ipv6_rules): for rule in ipv4_rules: self.iptables.ipv4['filter'].add_rule(chain_name, rule) if CONF.use_ipv6: for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule) def add_filters_for_instance(self, instance, network_info, inst_ipv4_rules, inst_ipv6_rules): chain_name = self._instance_chain_name(instance) if CONF.use_ipv6: self.iptables.ipv6['filter'].add_chain(chain_name) self.iptables.ipv4['filter'].add_chain(chain_name) ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name, network_info) self._add_filters('local', ipv4_rules, ipv6_rules) self._add_filters(chain_name, inst_ipv4_rules, inst_ipv6_rules) def remove_filters_for_instance(self, instance): chain_name = self._instance_chain_name(instance) self.iptables.ipv4['filter'].remove_chain(chain_name) if CONF.use_ipv6: self.iptables.ipv6['filter'].remove_chain(chain_name) def _instance_chain_name(self, instance): return 'inst-%s' % (instance['id'],) def _do_basic_rules(self, ipv4_rules, ipv6_rules, network_info): # Always drop invalid packets ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] # Allow established connections ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] # Pass through provider-wide drops ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] def _do_dhcp_rules(self, ipv4_rules, network_info): v4_subnets = self._get_subnets(network_info, 4) dhcp_servers = [subnet.get_meta('dhcp_server') for subnet in v4_subnets if subnet.get_meta('dhcp_server')] for dhcp_server in dhcp_servers: if dhcp_server: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' '-j ACCEPT' % (dhcp_server,)) self.dhcp_create = True def _do_project_network_rules(self, ipv4_rules, ipv6_rules, network_info): v4_subnets = self._get_subnets(network_info, 4) v6_subnets = self._get_subnets(network_info, 6) cidrs = [subnet['cidr'] for subnet in v4_subnets] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) if CONF.use_ipv6: cidrv6s = [subnet['cidr'] for subnet in v6_subnets] for cidrv6 in cidrv6s: ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) def _do_ra_rules(self, ipv6_rules, network_info): v6_subnets = self._get_subnets(network_info, 6) gateways_v6 = [subnet['gateway']['address'] for subnet in v6_subnets] for gateway_v6 in gateways_v6: ipv6_rules.append( '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) def _build_icmp_rule(self, rule, version): icmp_type = rule['from_port'] icmp_code = rule['to_port'] if icmp_type == -1: icmp_type_arg = None else: icmp_type_arg = '%s' % icmp_type if not icmp_code == -1: icmp_type_arg += '/%s' % icmp_code if icmp_type_arg: if version == 4: return ['-m', 'icmp', '--icmp-type', icmp_type_arg] elif version == 6: return ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg] # return empty list if icmp_type == -1 return [] def _build_tcp_udp_rule(self, rule, version): if rule['from_port'] == rule['to_port']: return ['--dport', '%s' % (rule['from_port'],)] else: return ['-m', 'multiport', '--dports', '%s:%s' % (rule['from_port'], rule['to_port'])] def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() if isinstance(instance, dict): # NOTE(danms): allow old-world instance objects from # unconverted callers; all we need is instance.uuid below instance = instance_obj.Instance._from_db_object( ctxt, instance_obj.Instance(), instance, []) ipv4_rules = [] ipv6_rules = [] # Initialize with basic rules self._do_basic_rules(ipv4_rules, ipv6_rules, network_info) # Set up rules to allow traffic to/from DHCP server self._do_dhcp_rules(ipv4_rules, network_info) #Allow project network traffic if CONF.allow_same_net_traffic: self._do_project_network_rules(ipv4_rules, ipv6_rules, network_info) # We wrap these in CONF.use_ipv6 because they might cause # a DB lookup. The other ones are just list operations, so # they're not worth the clutter. if CONF.use_ipv6: # Allow RA responses self._do_ra_rules(ipv6_rules, network_info) security_groups = security_group_obj.SecurityGroupList.get_by_instance( ctxt, instance) # then, security group chains and rules for security_group in security_groups: rules_cls = security_group_rule_obj.SecurityGroupRuleList rules = rules_cls.get_by_security_group(ctxt, security_group) for rule in rules: LOG.debug(_('Adding security group rule: %r'), rule, instance=instance) if not rule['cidr']: version = 4 else: version = netutils.get_ip_version(rule['cidr']) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule['protocol'] if protocol: protocol = rule['protocol'].lower() if version == 6 and protocol == 'icmp': protocol = 'icmpv6' args = ['-j ACCEPT'] if protocol: args += ['-p', protocol] if protocol in ['udp', 'tcp']: args += self._build_tcp_udp_rule(rule, version) elif protocol == 'icmp': args += self._build_icmp_rule(rule, version) if rule['cidr']: LOG.debug('Using cidr %r', rule['cidr'], instance=instance) args += ['-s', str(rule['cidr'])] fw_rules += [' '.join(args)] else: if rule['grantee_group']: insts = ( instance_obj.InstanceList.get_by_security_group( ctxt, rule['grantee_group'])) for instance in insts: if instance['info_cache']['deleted']: LOG.debug('ignoring deleted cache') continue nw_info = compute_utils.get_nw_info_for_instance( instance) ips = [ip['address'] for ip in nw_info.fixed_ips() if ip['version'] == version] LOG.debug('ips: %r', ips, instance=instance) for ip in ips: subrule = args + ['-s %s' % ip] fw_rules += [' '.join(subrule)] LOG.debug('Using fw_rules: %r', fw_rules, instance=instance) ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] return ipv4_rules, ipv6_rules def instance_filter_exists(self, instance, network_info): pass def refresh_security_group_members(self, security_group): self.do_refresh_security_group_rules(security_group) self.iptables.apply() def refresh_security_group_rules(self, security_group): self.do_refresh_security_group_rules(security_group) self.iptables.apply() def refresh_instance_security_rules(self, instance): self.do_refresh_instance_rules(instance) self.iptables.apply() @utils.synchronized('iptables', external=True) def _inner_do_refresh_rules(self, instance, network_info, ipv4_rules, ipv6_rules): chain_name = self._instance_chain_name(instance) if not self.iptables.ipv4['filter'].has_chain(chain_name): LOG.info( _('instance chain %s disappeared during refresh, ' 'skipping') % chain_name, instance=instance) return self.remove_filters_for_instance(instance) self.add_filters_for_instance(instance, network_info, ipv4_rules, ipv6_rules) def do_refresh_security_group_rules(self, security_group): id_list = self.instance_info.keys() for instance_id in id_list: try: instance, network_info = self.instance_info[instance_id] except KeyError: # NOTE(danms): instance cache must have been modified, # ignore this deleted instance and move on continue ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) self._inner_do_refresh_rules(instance, network_info, ipv4_rules, ipv6_rules) def do_refresh_instance_rules(self, instance): _instance, network_info = self.instance_info[instance['id']] ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) self._inner_do_refresh_rules(instance, network_info, ipv4_rules, ipv6_rules) def refresh_provider_fw_rules(self): """See :class:`FirewallDriver` docs.""" self._do_refresh_provider_fw_rules() self.iptables.apply() @utils.synchronized('iptables', external=True) def _do_refresh_provider_fw_rules(self): """Internal, synchronized version of refresh_provider_fw_rules.""" self._purge_provider_fw_rules() self._build_provider_fw_rules() def _purge_provider_fw_rules(self): """Remove all rules from the provider chains.""" self.iptables.ipv4['filter'].empty_chain('provider') if CONF.use_ipv6: self.iptables.ipv6['filter'].empty_chain('provider') def _build_provider_fw_rules(self): """Create all rules for the provider IP DROPs.""" self.iptables.ipv4['filter'].add_chain('provider') if CONF.use_ipv6: self.iptables.ipv6['filter'].add_chain('provider') ipv4_rules, ipv6_rules = self._provider_rules() for rule in ipv4_rules: self.iptables.ipv4['filter'].add_rule('provider', rule) if CONF.use_ipv6: for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule('provider', rule) def _provider_rules(self): """Generate a list of rules from provider for IP4 & IP6.""" ctxt = context.get_admin_context() ipv4_rules = [] ipv6_rules = [] rules = self._virtapi.provider_fw_rule_get_all(ctxt) for rule in rules: LOG.debug(_('Adding provider rule: %s'), rule['cidr']) version = netutils.get_ip_version(rule['cidr']) if version == 4: fw_rules = ipv4_rules else: fw_rules = ipv6_rules protocol = rule['protocol'] if version == 6 and protocol == 'icmp': protocol = 'icmpv6' args = ['-p', protocol, '-s', rule['cidr']] if protocol in ['udp', 'tcp']: if rule['from_port'] == rule['to_port']: args += ['--dport', '%s' % (rule['from_port'],)] else: args += ['-m', 'multiport', '--dports', '%s:%s' % (rule['from_port'], rule['to_port'])] elif protocol == 'icmp': icmp_type = rule['from_port'] icmp_code = rule['to_port'] if icmp_type == -1: icmp_type_arg = None else: icmp_type_arg = '%s' % icmp_type if not icmp_code == -1: icmp_type_arg += '/%s' % icmp_code if icmp_type_arg: if version == 4: args += ['-m', 'icmp', '--icmp-type', icmp_type_arg] elif version == 6: args += ['-m', 'icmp6', '--icmpv6-type', icmp_type_arg] args += ['-j DROP'] fw_rules += [' '.join(args)] return ipv4_rules, ipv6_rules class NoopFirewallDriver(object): """Firewall driver which just provides No-op methods.""" def __init__(self, *args, **kwargs): pass def _noop(self, *args, **kwargs): pass def __getattr__(self, key): return self._noop def instance_filter_exists(self, instance, network_info): return True nova-2014.1.5/nova/virt/event.py0000664000567000056700000000644712540642544017512 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Asynchronous event notifications from virtualization drivers. This module defines a set of classes representing data for various asynchronous events that can occur in a virtualization driver. """ import time from nova.openstack.common.gettextutils import _ EVENT_LIFECYCLE_STARTED = 0 EVENT_LIFECYCLE_STOPPED = 1 EVENT_LIFECYCLE_PAUSED = 2 EVENT_LIFECYCLE_RESUMED = 3 NAMES = { EVENT_LIFECYCLE_STARTED: _('Started'), EVENT_LIFECYCLE_STOPPED: _('Stopped'), EVENT_LIFECYCLE_PAUSED: _('Paused'), EVENT_LIFECYCLE_RESUMED: _('Resumed') } class Event(object): """Base class for all events emitted by a hypervisor. All events emitted by a virtualization driver are subclasses of this base object. The only generic information recorded in the base class is a timestamp indicating when the event first occurred. The timestamp is recorded as fractional seconds since the UNIX epoch. """ def __init__(self, timestamp=None): if timestamp is None: self.timestamp = time.time() else: self.timestamp = timestamp def get_timestamp(self): return self.timestamp def __repr__(self): return "<%s: %s>" % ( self.__class__.__name__, self.timestamp) class InstanceEvent(Event): """Base class for all instance events. All events emitted by a virtualization driver which are associated with a virtual domain instance are subclasses of this base object. This object records the UUID associated with the instance. """ def __init__(self, uuid, timestamp=None): super(InstanceEvent, self).__init__(timestamp) self.uuid = uuid def get_instance_uuid(self): return self.uuid def __repr__(self): return "<%s: %s, %s>" % ( self.__class__.__name__, self.timestamp, self.uuid) class LifecycleEvent(InstanceEvent): """Class for instance lifecycle state change events. When a virtual domain instance lifecycle state changes, events of this class are emitted. The EVENT_LIFECYCLE_XX constants defined why lifecycle change occurred. This event allows detection of an instance starting/stopping without need for polling. """ def __init__(self, uuid, transition, timestamp=None): super(LifecycleEvent, self).__init__(uuid, timestamp) self.transition = transition def get_transition(self): return self.transition def get_name(self): return NAMES.get(self.transition, _('Unknown')) def __repr__(self): return "<%s: %s, %s => %s>" % ( self.__class__.__name__, self.timestamp, self.uuid, self.get_name()) nova-2014.1.5/nova/virt/storage_users.py0000664000567000056700000001075212540642544021250 0ustar jenkinsjenkins00000000000000# Copyright 2012 Michael Still and Canonical Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import time from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import utils from oslo.config import cfg LOG = logging.getLogger(__name__) CONF = cfg.CONF TWENTY_FOUR_HOURS = 3600 * 24 # NOTE(morganfainberg): Due to circular import dependencies, the use of the # CONF.instances_path needs to be wrapped so that it can be resolved at the # appropriate time. Because compute.manager imports this file, we end up in # a rather ugly dependency loop without moving this into a wrapped function. # This issue mostly stems from the use of a decorator for the lock # synchronize and the implications of how decorators wrap the wrapped function # or method. If this needs to be used outside of compute.manager, it should # be refactored to eliminate this circular dependency loop. def register_storage_use(storage_path, hostname): """Identify the id of this instance storage.""" # NOTE(morganfainberg): config option import is avoided here since it is # explicitly imported from compute.manager and may cause issues with # defining options after config has been processed with the # wrapped-function style used here. LOCK_PATH = os.path.join(CONF.instances_path, 'locks') @utils.synchronized('storage-registry-lock', external=True, lock_path=LOCK_PATH) def do_register_storage_use(storage_path, hostname): # NOTE(mikal): this is required to determine if the instance storage is # shared, which is something that the image cache manager needs to # know. I can imagine other uses as well though. d = {} id_path = os.path.join(storage_path, 'compute_nodes') if os.path.exists(id_path): with open(id_path) as f: try: d = json.loads(f.read()) except ValueError: LOG.warning(_("Cannot decode JSON from %(id_path)s"), {"id_path": id_path}) d[hostname] = time.time() with open(id_path, 'w') as f: f.write(json.dumps(d)) return do_register_storage_use(storage_path, hostname) # NOTE(morganfainberg): Due to circular import dependencies, the use of the # CONF.instances_path needs to be wrapped so that it can be resolved at the # appropriate time. Because compute.manager imports this file, we end up in # a rather ugly dependency loop without moving this into a wrapped function. # This issue mostly stems from the use of a decorator for the lock # synchronize and the implications of how decorators wrap the wrapped function # or method. If this needs to be used outside of compute.manager, it should # be refactored to eliminate this circular dependency loop. def get_storage_users(storage_path): """Get a list of all the users of this storage path.""" # NOTE(morganfainberg): config option import is avoided here since it is # explicitly imported from compute.manager and may cause issues with # defining options after config has been processed with the # wrapped-function style used here. LOCK_PATH = os.path.join(CONF.instances_path, 'locks') @utils.synchronized('storage-registry-lock', external=True, lock_path=LOCK_PATH) def do_get_storage_users(storage_path): d = {} id_path = os.path.join(storage_path, 'compute_nodes') if os.path.exists(id_path): with open(id_path) as f: try: d = json.loads(f.read()) except ValueError: LOG.warning(_("Cannot decode JSON from %(id_path)s"), {"id_path": id_path}) recent_users = [] for node in d: if time.time() - d[node] < TWENTY_FOUR_HOURS: recent_users.append(node) return recent_users return do_get_storage_users(storage_path) nova-2014.1.5/nova/virt/interfaces.template0000664000567000056700000000142412540642544021665 0ustar jenkinsjenkins00000000000000# Injected by Nova on instance boot # # This file describes the network interfaces available on your system # and how to activate them. For more information, see interfaces(5). # The loopback network interface auto lo iface lo inet loopback {% for ifc in interfaces -%} auto {{ ifc.name }} iface {{ ifc.name }} inet static address {{ ifc.address }} netmask {{ ifc.netmask }} broadcast {{ ifc.broadcast }} {%- if ifc.gateway %} gateway {{ ifc.gateway }} {%- endif %} {%- if ifc.dns %} dns-nameservers {{ ifc.dns }} {%- endif %} {% if use_ipv6 -%} iface {{ ifc.name }} inet6 static address {{ ifc.address_v6 }} netmask {{ ifc.netmask_v6 }} {%- if ifc.gateway_v6 %} gateway {{ ifc.gateway_v6 }} {%- endif %} {%- endif %} {%- endfor %} nova-2014.1.5/nova/virt/netutils.py0000664000567000056700000001167412540642544020236 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright (c) 2010 Citrix Systems, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Network-related utilities for supporting libvirt connection code.""" import os import jinja2 import netaddr from oslo.config import cfg from nova.network import model CONF = cfg.CONF CONF.import_opt('use_ipv6', 'nova.netconf') CONF.import_opt('injected_network_template', 'nova.virt.disk.api') def get_net_and_mask(cidr): net = netaddr.IPNetwork(cidr) return str(net.ip), str(net.netmask) def get_net_and_prefixlen(cidr): net = netaddr.IPNetwork(cidr) return str(net.ip), str(net._prefixlen) def get_ip_version(cidr): net = netaddr.IPNetwork(cidr) return int(net.version) def _get_first_network(network, version): # Using a generator expression with a next() call for the first element # of a list since we don't want to evaluate the whole list as we can # have a lot of subnets try: return (i for i in network['subnets'] if i['version'] == version).next() except StopIteration: pass def get_injected_network_template(network_info, use_ipv6=CONF.use_ipv6, template=CONF.injected_network_template): """Returns a rendered network template for the given network_info. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param use_ipv6: If False, do not return IPv6 template information even if an IPv6 subnet is present in network_info. :param template: Path to the interfaces template file. """ if not (network_info and template): return nets = [] ifc_num = -1 ipv6_is_available = False for vif in network_info: if not vif['network'] or not vif['network']['subnets']: continue network = vif['network'] # NOTE(bnemec): The template only supports a single subnet per # interface and I'm not sure how/if that can be fixed, so this # code only takes the first subnet of the appropriate type. subnet_v4 = _get_first_network(network, 4) subnet_v6 = _get_first_network(network, 6) ifc_num += 1 if not network.get_meta('injected'): continue address = None netmask = None gateway = '' broadcast = None dns = None if subnet_v4: if subnet_v4.get_meta('dhcp_server') is not None: continue if subnet_v4['ips']: ip = subnet_v4['ips'][0] address = ip['address'] netmask = model.get_netmask(ip, subnet_v4) if subnet_v4['gateway']: gateway = subnet_v4['gateway']['address'] broadcast = str(subnet_v4.as_netaddr().broadcast) dns = ' '.join([i['address'] for i in subnet_v4['dns']]) address_v6 = None gateway_v6 = '' netmask_v6 = None have_ipv6 = (use_ipv6 and subnet_v6) if have_ipv6: if subnet_v6.get_meta('dhcp_server') is not None: continue if subnet_v6['ips']: ipv6_is_available = True ip_v6 = subnet_v6['ips'][0] address_v6 = ip_v6['address'] netmask_v6 = model.get_netmask(ip_v6, subnet_v6) if subnet_v6['gateway']: gateway_v6 = subnet_v6['gateway']['address'] net_info = {'name': 'eth%d' % ifc_num, 'address': address, 'netmask': netmask, 'gateway': gateway, 'broadcast': broadcast, 'dns': dns, 'address_v6': address_v6, 'gateway_v6': gateway_v6, 'netmask_v6': netmask_v6, } nets.append(net_info) if not nets: return return build_template(template, nets, ipv6_is_available) def build_template(template, nets, ipv6_is_available): tmpl_path, tmpl_file = os.path.split(CONF.injected_network_template) env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path)) template = env.get_template(tmpl_file) return template.render({'interfaces': nets, 'use_ipv6': ipv6_is_available}) nova-2014.1.5/nova/virt/block_device.py0000664000567000056700000003702212540642544020773 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import operator from nova import block_device from nova.objects import block_device as block_device_obj from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common.gettextutils import _LI from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.volume import encryptors LOG = logging.getLogger(__name__) class _NotTransformable(Exception): pass class _InvalidType(_NotTransformable): pass class _NoLegacy(Exception): pass def update_db(method): @functools.wraps(method) def wrapped(obj, context, *args, **kwargs): ret_val = method(obj, context, *args, **kwargs) obj.save(context) return ret_val return wrapped class DriverBlockDevice(dict): """A dict subclass that represents block devices used by the virt layer. Uses block device objects internally to do the database access. _fields and _legacy_fields class attributes present a set of fields that are expected on a certain DriverBlockDevice type. We may have more legacy versions in the future. If an attribute access is attempted for a name that is found in the _proxy_as_attr set, it will be proxied to the underlying object. This allows us to access stuff that is not part of the data model that all drivers understand. The save() method allows us to update the database using the underlying object. _update_on_save class attribute dictionary keeps the following mapping: {'object field name': 'driver dict field name (or None if same)'} These fields will be updated on the internal object, from the values in the dict, before the actual database update is done. """ _fields = set() _legacy_fields = set() _proxy_as_attr = set() _update_on_save = {'disk_bus': None, 'device_name': None, 'device_type': None} def __init__(self, bdm): # TODO(ndipanov): Remove this check when we have all the rpc methods # use objects for block devices. if isinstance(bdm, block_device_obj.BlockDeviceMapping): self.__dict__['_bdm_obj'] = bdm else: self.__dict__['_bdm_obj'] = block_device_obj.BlockDeviceMapping() self._bdm_obj.update(block_device.BlockDeviceDict(bdm)) self._bdm_obj.obj_reset_changes() if self._bdm_obj.no_device: raise _NotTransformable() self.update(dict((field, None) for field in self._fields)) self._transform() def __getattr__(self, name): if name in self._proxy_as_attr: return getattr(self._bdm_obj, name) else: raise AttributeError("Cannot access %s on DriverBlockDevice " "class" % name) def __setattr__(self, name, value): if name in self._proxy_as_attr: return setattr(self._bdm_obj, name, value) else: raise AttributeError("Cannot access %s on DriverBlockDevice " "class" % name) def _transform(self): """Transform bdm to the format that is passed to drivers.""" raise NotImplementedError() def legacy(self): """Basic legacy transformation. Basic method will just drop the fields that are not in _legacy_fields set. Override this in subclass if needed. """ return dict((key, self.get(key)) for key in self._legacy_fields) def attach(self, **kwargs): """Make the device available to be used by VMs. To be overriden in subclasses with the connecting logic for the type of device the subclass represents. """ raise NotImplementedError() def save(self, context): for attr_name, key_name in self._update_on_save.iteritems(): setattr(self._bdm_obj, attr_name, self[key_name or attr_name]) self._bdm_obj.save(context) class DriverSwapBlockDevice(DriverBlockDevice): _fields = set(['device_name', 'swap_size', 'disk_bus']) _legacy_fields = _fields - set(['disk_bus']) _update_on_save = {'disk_bus': None, 'device_name': None} def _transform(self): if not block_device.new_format_is_swap(self._bdm_obj): raise _InvalidType self.update({ 'device_name': self._bdm_obj.device_name, 'swap_size': self._bdm_obj.volume_size or 0, 'disk_bus': self._bdm_obj.disk_bus }) class DriverEphemeralBlockDevice(DriverBlockDevice): _new_only_fields = set(['disk_bus', 'device_type', 'guest_format']) _fields = set(['device_name', 'size']) | _new_only_fields _legacy_fields = (_fields - _new_only_fields | set(['num', 'virtual_name'])) def _transform(self): if not block_device.new_format_is_ephemeral(self._bdm_obj): raise _InvalidType self.update({ 'device_name': self._bdm_obj.device_name, 'size': self._bdm_obj.volume_size or 0, 'disk_bus': self._bdm_obj.disk_bus, 'device_type': self._bdm_obj.device_type, 'guest_format': self._bdm_obj.guest_format }) def legacy(self, num=0): legacy_bdm = super(DriverEphemeralBlockDevice, self).legacy() legacy_bdm['num'] = num legacy_bdm['virtual_name'] = 'ephemeral' + str(num) return legacy_bdm class DriverVolumeBlockDevice(DriverBlockDevice): _legacy_fields = set(['connection_info', 'mount_device', 'delete_on_termination']) _new_fields = set(['guest_format', 'device_type', 'disk_bus', 'boot_index']) _fields = _legacy_fields | _new_fields _valid_source = 'volume' _valid_destination = 'volume' _proxy_as_attr = set(['volume_size', 'volume_id']) _update_on_save = {'disk_bus': None, 'device_name': 'mount_device', 'device_type': None} def _transform(self): if (not self._bdm_obj.source_type == self._valid_source or not self._bdm_obj.destination_type == self._valid_destination): raise _InvalidType self.update( dict((k, v) for k, v in self._bdm_obj.iteritems() if k in self._new_fields | set(['delete_on_termination'])) ) self['mount_device'] = self._bdm_obj.device_name try: self['connection_info'] = jsonutils.loads( self._bdm_obj.connection_info) except TypeError: self['connection_info'] = None def _preserve_multipath_id(self, connection_info): if self['connection_info'] and 'data' in self['connection_info']: if 'multipath_id' in self['connection_info']['data']: connection_info['data']['multipath_id'] =\ self['connection_info']['data']['multipath_id'] LOG.info(_LI('preserve multipath_id %s'), connection_info['data']['multipath_id']) @update_db def attach(self, context, instance, volume_api, virt_driver, do_check_attach=True, do_driver_attach=False): volume = volume_api.get(context, self.volume_id) if do_check_attach: volume_api.check_attach(context, volume, instance=instance) volume_id = volume['id'] context = context.elevated() connector = virt_driver.get_volume_connector(instance) connection_info = volume_api.initialize_connection(context, volume_id, connector) if 'serial' not in connection_info: connection_info['serial'] = self.volume_id self._preserve_multipath_id(connection_info) # If do_driver_attach is False, we will attach a volume to an instance # at boot time. So actual attach is done by instance creation code. if do_driver_attach: encryption = encryptors.get_encryption_metadata( context, volume_api, volume_id, connection_info) try: virt_driver.attach_volume( context, connection_info, instance, self['mount_device'], disk_bus=self['disk_bus'], device_type=self['device_type'], encryption=encryption) except Exception: # pylint: disable=W0702 with excutils.save_and_reraise_exception(): LOG.exception(_("Driver failed to attach volume " "%(volume_id)s at %(mountpoint)s"), {'volume_id': volume_id, 'mountpoint': self['mount_device']}, context=context, instance=instance) volume_api.terminate_connection(context, volume_id, connector) self['connection_info'] = connection_info mode = 'rw' if 'data' in connection_info: mode = connection_info['data'].get('access_mode', 'rw') volume_api.attach(context, volume_id, instance['uuid'], self['mount_device'], mode=mode) @update_db def refresh_connection_info(self, context, instance, volume_api, virt_driver): # NOTE (ndipanov): A no-op if there is no connection info already if not self['connection_info']: return connector = virt_driver.get_volume_connector(instance) connection_info = volume_api.initialize_connection(context, self.volume_id, connector) if 'serial' not in connection_info: connection_info['serial'] = self.volume_id self._preserve_multipath_id(connection_info) self['connection_info'] = connection_info def save(self, context): # NOTE(ndipanov): we might want to generalize this by adding it to the # _update_on_save and adding a transformation function. try: self._bdm_obj.connection_info = jsonutils.dumps( self.get('connection_info')) except TypeError: pass super(DriverVolumeBlockDevice, self).save(context) class DriverSnapshotBlockDevice(DriverVolumeBlockDevice): _valid_source = 'snapshot' _proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id']) def attach(self, context, instance, volume_api, virt_driver, wait_func=None): if not self.volume_id: snapshot = volume_api.get_snapshot(context, self.snapshot_id) vol = volume_api.create(context, self.volume_size, '', '', snapshot) if wait_func: wait_func(context, vol['id']) self.volume_id = vol['id'] # Call the volume attach now super(DriverSnapshotBlockDevice, self).attach(context, instance, volume_api, virt_driver) class DriverImageBlockDevice(DriverVolumeBlockDevice): _valid_source = 'image' _proxy_as_attr = set(['volume_size', 'volume_id', 'image_id']) def attach(self, context, instance, volume_api, virt_driver, wait_func=None): if not self.volume_id: vol = volume_api.create(context, self.volume_size, '', '', image_id=self.image_id) if wait_func: wait_func(context, vol['id']) self.volume_id = vol['id'] super(DriverImageBlockDevice, self).attach(context, instance, volume_api, virt_driver) def _convert_block_devices(device_type, block_device_mapping): def _is_transformable(bdm): try: device_type(bdm) except _NotTransformable: return False return True return [device_type(bdm) for bdm in block_device_mapping if _is_transformable(bdm)] convert_swap = functools.partial(_convert_block_devices, DriverSwapBlockDevice) convert_ephemerals = functools.partial(_convert_block_devices, DriverEphemeralBlockDevice) convert_volumes = functools.partial(_convert_block_devices, DriverVolumeBlockDevice) convert_snapshots = functools.partial(_convert_block_devices, DriverSnapshotBlockDevice) convert_images = functools.partial(_convert_block_devices, DriverImageBlockDevice) def attach_block_devices(block_device_mapping, *attach_args, **attach_kwargs): def _log_and_attach(bdm): context = attach_args[0] instance = attach_args[1] LOG.audit(_('Booting with volume %(volume_id)s at %(mountpoint)s'), {'volume_id': bdm.volume_id, 'mountpoint': bdm['mount_device']}, context=context, instance=instance) bdm.attach(*attach_args, **attach_kwargs) map(_log_and_attach, block_device_mapping) return block_device_mapping def refresh_conn_infos(block_device_mapping, *refresh_args, **refresh_kwargs): map(operator.methodcaller('refresh_connection_info', *refresh_args, **refresh_kwargs), block_device_mapping) return block_device_mapping def legacy_block_devices(block_device_mapping): def _has_legacy(bdm): try: bdm.legacy() except _NoLegacy: return False return True bdms = [bdm.legacy() for bdm in block_device_mapping if _has_legacy(bdm)] # Re-enumerate ephemeral devices if all(isinstance(bdm, DriverEphemeralBlockDevice) for bdm in block_device_mapping): for i, dev in enumerate(bdms): dev['virtual_name'] = dev['virtual_name'][:-1] + str(i) dev['num'] = i return bdms def get_swap(transformed_list): """Get the swap device out of the list context. The block_device_info needs swap to be a single device, not a list - otherwise this is a no-op. """ if not all(isinstance(device, DriverSwapBlockDevice) or 'swap_size' in device for device in transformed_list): return transformed_list try: return transformed_list.pop() except IndexError: return None _IMPLEMENTED_CLASSES = (DriverSwapBlockDevice, DriverEphemeralBlockDevice, DriverVolumeBlockDevice, DriverSnapshotBlockDevice, DriverImageBlockDevice) def is_implemented(bdm): for cls in _IMPLEMENTED_CLASSES: try: cls(bdm) return True except _NotTransformable: pass return False nova-2014.1.5/nova/scheduler/0000775000567000056700000000000012540643452016775 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/scheduler/scheduler_options.py0000664000567000056700000000706112540642543023104 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import json import os from oslo.config import cfg from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', default='', help='Absolute path to scheduler configuration JSON file.') CONF = cfg.CONF CONF.register_opt(scheduler_json_config_location_opt) LOG = logging.getLogger(__name__) class SchedulerOptions(object): """SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error as e: with excutils.save_and_reraise_exception(): LOG.exception(_("Could not stat scheduler options file " "%(filename)s: '%(e)s'"), {'filename': filename, 'e': e}) def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return json.load(handle) except ValueError as e: LOG.exception(_("Could not decode scheduler options: '%s'"), e) return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data nova-2014.1.5/nova/scheduler/weights/0000775000567000056700000000000012540643452020447 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/scheduler/weights/ram.py0000664000567000056700000000311612540642543021601 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ RAM Weigher. Weigh hosts by their RAM usage. The default is to spread instances across all hosts evenly. If you prefer stacking, you can set the 'ram_weight_multiplier' option to a negative number and the weighing has the opposite effect of the default. """ from oslo.config import cfg from nova.scheduler import weights ram_weight_opts = [ cfg.FloatOpt('ram_weight_multiplier', default=1.0, help='Multiplier used for weighing ram. Negative ' 'numbers mean to stack vs spread.'), ] CONF = cfg.CONF CONF.register_opts(ram_weight_opts) class RAMWeigher(weights.BaseHostWeigher): minval = 0 def weight_multiplier(self): """Override the weight multiplier.""" return CONF.ram_weight_multiplier def _weigh_object(self, host_state, weight_properties): """Higher weights win. We want spreading to be the default.""" return host_state.free_ram_mb nova-2014.1.5/nova/scheduler/weights/metrics.py0000664000567000056700000001054012540642543022467 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Metrics Weigher. Weigh hosts by their metrics. This weigher can compute the weight based on the compute node host's various metrics. The to-be weighed metrics and their weighing ratio are specified in the configuration file as the followings: [metrics] weight_setting = name1=1.0, name2=-1.0 The final weight would be name1.value * 1.0 + name2.value * -1.0. """ from oslo.config import cfg from nova import exception from nova.scheduler import utils from nova.scheduler import weights metrics_weight_opts = [ cfg.FloatOpt('weight_multiplier', default=1.0, help='Multiplier used for weighing metrics.'), cfg.ListOpt('weight_setting', default=[], help='How the metrics are going to be weighed. This ' 'should be in the form of "=, ' '=, ...", where is one ' 'of the metrics to be weighed, and is ' 'the corresponding ratio. So for "name1=1.0, ' 'name2=-1.0" The final weight would be ' 'name1.value * 1.0 + name2.value * -1.0.'), cfg.BoolOpt('required', default=True, help='How to treat the unavailable metrics. When a ' 'metric is NOT available for a host, if it is set ' 'to be True, it would raise an exception, so it ' 'is recommended to use the scheduler filter ' 'MetricFilter to filter out those hosts. If it is ' 'set to be False, the unavailable metric would be ' 'treated as a negative factor in weighing ' 'process, the returned value would be set by ' 'the option weight_of_unavailable.'), cfg.FloatOpt('weight_of_unavailable', default=float(-10000.0), help='The final weight value to be returned if ' 'required is set to False and any one of the ' 'metrics set by weight_setting is unavailable.'), ] CONF = cfg.CONF CONF.register_opts(metrics_weight_opts, group='metrics') class MetricsWeigher(weights.BaseHostWeigher): def __init__(self): self._parse_setting() def _parse_setting(self): self.setting = utils.parse_options(CONF.metrics.weight_setting, sep='=', converter=float, name="metrics.weight_setting") def weight_multiplier(self): """Override the weight multiplier.""" return CONF.metrics.weight_multiplier def _weigh_object(self, host_state, weight_properties): value = 0.0 for (name, ratio) in self.setting: try: value += host_state.metrics[name].value * ratio except KeyError: if CONF.metrics.required: raise exception.ComputeHostMetricNotFound( host=host_state.host, node=host_state.nodename, name=name) else: # We treat the unavailable metric as the most negative # factor, i.e. set the value to make this obj would be # at the end of the ordered weighed obj list # Do nothing if ratio or weight_multiplier is 0. if ratio * self.weight_multiplier() != 0: return CONF.metrics.weight_of_unavailable return value nova-2014.1.5/nova/scheduler/weights/__init__.py0000664000567000056700000000263512540642543022566 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host weights """ from oslo.config import cfg from nova import weights CONF = cfg.CONF class WeighedHost(weights.WeighedObject): def to_dict(self): x = dict(weight=self.weight) x['host'] = self.obj.host return x def __repr__(self): return "WeighedHost [host: %s, weight: %s]" % ( self.obj.host, self.weight) class BaseHostWeigher(weights.BaseWeigher): """Base class for host weights.""" pass class HostWeightHandler(weights.BaseWeightHandler): object_class = WeighedHost def __init__(self): super(HostWeightHandler, self).__init__(BaseHostWeigher) def all_weighers(): """Return a list of weight plugin classes found in this directory.""" return HostWeightHandler().get_all_classes() nova-2014.1.5/nova/scheduler/driver.py0000664000567000056700000001163512540642543020650 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler base class that all Schedulers should inherit from """ import sys from oslo.config import cfg from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova import db from nova import exception from nova import notifications from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova import rpc from nova import servicegroup LOG = logging.getLogger(__name__) scheduler_driver_opts = [ cfg.StrOpt('scheduler_host_manager', default='nova.scheduler.host_manager.HostManager', help='The scheduler host manager class to use'), cfg.IntOpt('scheduler_max_attempts', default=3, help='Maximum number of attempts to schedule an instance'), ] CONF = cfg.CONF CONF.register_opts(scheduler_driver_opts) def handle_schedule_error(context, ex, instance_uuid, request_spec): """On run_instance failure, update instance state and send notifications. """ if not isinstance(ex, exception.NoValidHost): LOG.exception(_("Exception during scheduler.run_instance")) state = vm_states.ERROR.upper() LOG.warning(_('Setting instance to %s state.'), state, instance_uuid=instance_uuid) (old_ref, new_ref) = db.instance_update_and_get_original(context, instance_uuid, {'vm_state': vm_states.ERROR, 'task_state': None}) notifications.send_update(context, old_ref, new_ref, service="scheduler") compute_utils.add_instance_fault_from_exc(context, conductor_api.LocalAPI(), new_ref, ex, sys.exc_info()) properties = request_spec.get('instance_properties', {}) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_states.ERROR, method='run_instance', reason=ex) rpc.get_notifier('scheduler').error(context, 'scheduler.run_instance', payload) def instance_update_db(context, instance_uuid, extra_values=None): """Clear the host and node - set the scheduled_at field of an Instance. :returns: An Instance with the updated fields set properly. """ now = timeutils.utcnow() values = {'host': None, 'node': None, 'scheduled_at': now} if extra_values: values.update(extra_values) return db.instance_update(context, instance_uuid, values) class Scheduler(object): """The base class that all Scheduler classes should inherit from.""" def __init__(self): self.host_manager = importutils.import_object( CONF.scheduler_host_manager) self.servicegroup_api = servicegroup.API() def run_periodic_tasks(self, context): """Manager calls this so drivers can perform periodic tasks.""" pass def hosts_up(self, context, topic): """Return the list of hosts that have a running service for topic.""" services = db.service_get_all_by_topic(context, topic) return [service['host'] for service in services if self.servicegroup_api.service_is_up(service)] def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec): """Must override schedule_run_instance method for scheduler to work.""" msg = _("Driver must implement schedule_run_instance") raise NotImplementedError(msg) def select_destinations(self, context, request_spec, filter_properties): """Must override select_destinations method. :return: A list of dicts with 'host', 'nodename' and 'limits' as keys that satisfies the request_spec and filter_properties. """ msg = _("Driver must implement select_destinations") raise NotImplementedError(msg) nova-2014.1.5/nova/scheduler/filter_scheduler.py0000664000567000056700000003663412540642543022706 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The FilterScheduler is for creating instances locally. You can customize this scheduler by specifying your own Host Filters and Weighing Functions. """ import random from oslo.config import cfg from nova.compute import rpcapi as compute_rpcapi from nova import exception from nova.objects import instance_group as instance_group_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.pci import pci_request from nova import rpc from nova.scheduler import driver from nova.scheduler import scheduler_options from nova.scheduler import utils as scheduler_utils CONF = cfg.CONF LOG = logging.getLogger(__name__) filter_scheduler_opts = [ cfg.IntOpt('scheduler_host_subset_size', default=1, help='New instances will be scheduled on a host chosen ' 'randomly from a subset of the N best hosts. This ' 'property defines the subset size that a host is ' 'chosen from. A value of 1 chooses the ' 'first host returned by the weighing functions. ' 'This value must be at least 1. Any value less than 1 ' 'will be ignored, and 1 will be used instead') ] CONF.register_opts(filter_scheduler_opts) class FilterScheduler(driver.Scheduler): """Scheduler that can be used for filtering and weighing.""" def __init__(self, *args, **kwargs): super(FilterScheduler, self).__init__(*args, **kwargs) self.options = scheduler_options.SchedulerOptions() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.notifier = rpc.get_notifier('scheduler') def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec): """This method is called from nova.compute.api to provision an instance. We first create a build plan (a list of WeightedHosts) and then provision. Returns a list of the instances created. """ payload = dict(request_spec=request_spec) self.notifier.info(context, 'scheduler.run_instance.start', payload) instance_uuids = request_spec.get('instance_uuids') LOG.info(_("Attempting to build %(num_instances)d instance(s) " "uuids: %(instance_uuids)s"), {'num_instances': len(instance_uuids), 'instance_uuids': instance_uuids}) LOG.debug(_("Request Spec: %s") % request_spec) weighed_hosts = self._schedule(context, request_spec, filter_properties, instance_uuids) # NOTE: Pop instance_uuids as individual creates do not need the # set of uuids. Do not pop before here as the upper exception # handler fo NoValidHost needs the uuid to set error state instance_uuids = request_spec.pop('instance_uuids') # NOTE(comstud): Make sure we do not pass this through. It # contains an instance of RpcContext that cannot be serialized. filter_properties.pop('context', None) for num, instance_uuid in enumerate(instance_uuids): request_spec['instance_properties']['launch_index'] = num try: try: weighed_host = weighed_hosts.pop(0) LOG.info(_("Choosing host %(weighed_host)s " "for instance %(instance_uuid)s"), {'weighed_host': weighed_host, 'instance_uuid': instance_uuid}) except IndexError: raise exception.NoValidHost(reason="") self._provision_resource(context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=instance_uuid, legacy_bdm_in_spec=legacy_bdm_in_spec) except Exception as ex: # NOTE(vish): we don't reraise the exception here to make sure # that all instances in the request get set to # error properly driver.handle_schedule_error(context, ex, instance_uuid, request_spec) # scrub retry host list in case we're scheduling multiple # instances: retry = filter_properties.get('retry', {}) retry['hosts'] = [] self.notifier.info(context, 'scheduler.run_instance.end', payload) def select_destinations(self, context, request_spec, filter_properties): """Selects a filtered set of hosts and nodes.""" num_instances = request_spec['num_instances'] instance_uuids = request_spec.get('instance_uuids') selected_hosts = self._schedule(context, request_spec, filter_properties, instance_uuids) # Couldn't fulfill the request_spec if len(selected_hosts) < num_instances: raise exception.NoValidHost(reason='') dests = [dict(host=host.obj.host, nodename=host.obj.nodename, limits=host.obj.limits) for host in selected_hosts] return dests def _provision_resource(self, context, weighed_host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, instance_uuid=None, legacy_bdm_in_spec=True): """Create the requested resource in this Zone.""" # NOTE(vish): add our current instance back into the request spec request_spec['instance_uuids'] = [instance_uuid] payload = dict(request_spec=request_spec, weighted_host=weighed_host.to_dict(), instance_id=instance_uuid) self.notifier.info(context, 'scheduler.run_instance.scheduled', payload) # Update the metadata if necessary scheduler_hints = filter_properties.get('scheduler_hints') or {} try: updated_instance = driver.instance_update_db(context, instance_uuid) except exception.InstanceNotFound: LOG.warning(_("Instance disappeared during scheduling"), context=context, instance_uuid=instance_uuid) else: scheduler_utils.populate_filter_properties(filter_properties, weighed_host.obj) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=weighed_host.obj.host, request_spec=request_spec, filter_properties=filter_properties, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, node=weighed_host.obj.nodename, legacy_bdm_in_spec=legacy_bdm_in_spec) def _get_configuration_options(self): """Fetch options dictionary. Broken out for testing.""" return self.options.get_configuration() def populate_filter_properties(self, request_spec, filter_properties): """Stuff things into filter_properties. Can be overridden in a subclass to add more data. """ # Save useful information from the request spec for filter processing: project_id = request_spec['instance_properties']['project_id'] os_type = request_spec['instance_properties']['os_type'] filter_properties['project_id'] = project_id filter_properties['os_type'] = os_type pci_requests = pci_request.get_pci_requests_from_flavor( request_spec.get('instance_type') or {}) if pci_requests: filter_properties['pci_requests'] = pci_requests def _max_attempts(self): max_attempts = CONF.scheduler_max_attempts if max_attempts < 1: raise exception.NovaException(_("Invalid value for " "'scheduler_max_attempts', must be >= 1")) return max_attempts def _log_compute_error(self, instance_uuid, retry): """If the request contained an exception from a previous compute build/resize operation, log it to aid debugging """ exc = retry.pop('exc', None) # string-ified exception from compute if not exc: return # no exception info from a previous attempt, skip hosts = retry.get('hosts', None) if not hosts: return # no previously attempted hosts, skip last_host, last_node = hosts[-1] LOG.error(_('Error from last host: %(last_host)s (node %(last_node)s):' ' %(exc)s'), {'last_host': last_host, 'last_node': last_node, 'exc': exc}, instance_uuid=instance_uuid) def _populate_retry(self, filter_properties, instance_properties): """Populate filter properties with history of retries for this request. If maximum retries is exceeded, raise NoValidHost. """ max_attempts = self._max_attempts() force_hosts = filter_properties.get('force_hosts', []) force_nodes = filter_properties.get('force_nodes', []) if max_attempts == 1 or force_hosts or force_nodes: # re-scheduling is disabled. return retry = filter_properties.pop('retry', {}) # retry is enabled, update attempt count: if retry: retry['num_attempts'] += 1 else: retry = { 'num_attempts': 1, 'hosts': [] # list of compute hosts tried } filter_properties['retry'] = retry instance_uuid = instance_properties.get('uuid') self._log_compute_error(instance_uuid, retry) if retry['num_attempts'] > max_attempts: msg = (_('Exceeded max scheduling attempts %(max_attempts)d for ' 'instance %(instance_uuid)s') % {'max_attempts': max_attempts, 'instance_uuid': instance_uuid}) raise exception.NoValidHost(reason=msg) @staticmethod def _setup_instance_group(context, filter_properties): update_group_hosts = False scheduler_hints = filter_properties.get('scheduler_hints') or {} group_hint = scheduler_hints.get('group', None) if group_hint: group = instance_group_obj.InstanceGroup.get_by_hint(context, group_hint) policies = set(('anti-affinity', 'affinity')) if any((policy in policies) for policy in group.policies): update_group_hosts = True filter_properties.setdefault('group_hosts', set()) user_hosts = set(filter_properties['group_hosts']) group_hosts = set(group.get_hosts(context)) filter_properties['group_hosts'] = user_hosts | group_hosts filter_properties['group_policies'] = group.policies return update_group_hosts def _schedule(self, context, request_spec, filter_properties, instance_uuids=None): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ elevated = context.elevated() instance_properties = request_spec['instance_properties'] instance_type = request_spec.get("instance_type", None) update_group_hosts = self._setup_instance_group(context, filter_properties) config_options = self._get_configuration_options() # check retry policy. Rather ugly use of instance_uuids[0]... # but if we've exceeded max retries... then we really only # have a single instance. properties = instance_properties.copy() if instance_uuids: properties['uuid'] = instance_uuids[0] self._populate_retry(filter_properties, properties) filter_properties.update({'context': context, 'request_spec': request_spec, 'config_options': config_options, 'instance_type': instance_type}) self.populate_filter_properties(request_spec, filter_properties) # Find our local list of acceptable hosts by repeatedly # filtering and weighing our options. Each time we choose a # host, we virtually consume resources on it so subsequent # selections can adjust accordingly. # Note: remember, we are using an iterator here. So only # traverse this list once. This can bite you if the hosts # are being scanned in a filter or weighing function. hosts = self._get_all_host_states(elevated) selected_hosts = [] if instance_uuids: num_instances = len(instance_uuids) else: num_instances = request_spec.get('num_instances', 1) for num in xrange(num_instances): # Filter local hosts based on requirements ... hosts = self.host_manager.get_filtered_hosts(hosts, filter_properties, index=num) if not hosts: # Can't get any more locally. break LOG.debug(_("Filtered %(hosts)s"), {'hosts': hosts}) weighed_hosts = self.host_manager.get_weighed_hosts(hosts, filter_properties) LOG.debug(_("Weighed %(hosts)s"), {'hosts': weighed_hosts}) scheduler_host_subset_size = CONF.scheduler_host_subset_size if scheduler_host_subset_size > len(weighed_hosts): scheduler_host_subset_size = len(weighed_hosts) if scheduler_host_subset_size < 1: scheduler_host_subset_size = 1 chosen_host = random.choice( weighed_hosts[0:scheduler_host_subset_size]) selected_hosts.append(chosen_host) # Now consume the resources so the filter/weights # will change for the next instance. chosen_host.obj.consume_from_instance(instance_properties) if update_group_hosts is True: filter_properties['group_hosts'].add(chosen_host.obj.host) return selected_hosts def _get_all_host_states(self, context): """Template method, so a subclass can implement caching.""" return self.host_manager.get_all_host_states(context) nova-2014.1.5/nova/scheduler/manager.py0000664000567000056700000003524612540642543020773 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler Service """ from oslo.config import cfg from oslo import messaging from nova.compute import rpcapi as compute_rpcapi from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api from nova.conductor.tasks import live_migrate from nova import exception from nova import manager from nova.objects import instance as instance_obj from nova.openstack.common import excutils from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import periodic_task from nova import quota from nova.scheduler import utils as scheduler_utils LOG = logging.getLogger(__name__) scheduler_driver_opts = [ cfg.StrOpt('scheduler_driver', default='nova.scheduler.filter_scheduler.FilterScheduler', help='Default driver to use for the scheduler'), cfg.IntOpt('scheduler_driver_task_period', default=60, help='How often (in seconds) to run periodic tasks in ' 'the scheduler driver of your choice. ' 'Please note this is likely to interact with the value ' 'of service_down_time, but exactly how they interact ' 'will depend on your choice of scheduler driver.'), ] CONF = cfg.CONF CONF.register_opts(scheduler_driver_opts) QUOTAS = quota.QUOTAS class SchedulerManager(manager.Manager): """Chooses a host to run instances on.""" target = messaging.Target(version='2.9') def __init__(self, scheduler_driver=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = CONF.scheduler_driver self.driver = importutils.import_object(scheduler_driver) self.compute_rpcapi = compute_rpcapi.ComputeAPI() super(SchedulerManager, self).__init__(service_name='scheduler', *args, **kwargs) self.additional_endpoints.append(_SchedulerManagerV3Proxy(self)) def create_volume(self, context, volume_id, snapshot_id, reservations=None, image_id=None): #function removed in RPC API 2.3 pass @messaging.expected_exceptions(exception.NoValidHost, exception.ComputeServiceUnavailable, exception.InvalidHypervisorType, exception.UnableToMigrateToSelf, exception.DestinationHypervisorTooOld, exception.InvalidLocalStorage, exception.InvalidSharedStorage, exception.MigrationPreCheckError) def live_migration(self, context, instance, dest, block_migration, disk_over_commit): try: self._schedule_live_migration(context, instance, dest, block_migration, disk_over_commit) except (exception.NoValidHost, exception.ComputeServiceUnavailable, exception.InvalidHypervisorType, exception.UnableToMigrateToSelf, exception.DestinationHypervisorTooOld, exception.InvalidLocalStorage, exception.InvalidSharedStorage, exception.MigrationPreCheckError) as ex: request_spec = {'instance_properties': { 'uuid': instance['uuid'], }, } with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify('live_migration', dict(vm_state=instance['vm_state'], task_state=None, expected_task_state=task_states.MIGRATING,), context, ex, request_spec) except Exception as ex: request_spec = {'instance_properties': { 'uuid': instance['uuid'], }, } with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify('live_migration', {'vm_state': vm_states.ERROR}, context, ex, request_spec) def _schedule_live_migration(self, context, instance, dest, block_migration, disk_over_commit): task = live_migrate.LiveMigrationTask(context, instance, dest, block_migration, disk_over_commit) return task.execute() def run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec=True): """Tries to call schedule_run_instance on the driver. Sets instance vm_state to ERROR on exceptions """ instance_uuids = request_spec['instance_uuids'] with compute_utils.EventReporter(context, conductor_api.LocalAPI(), 'schedule', *instance_uuids): try: return self.driver.schedule_run_instance(context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec) except exception.NoValidHost as ex: # don't re-raise self._set_vm_state_and_notify('run_instance', {'vm_state': vm_states.ERROR, 'task_state': None}, context, ex, request_spec) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify('run_instance', {'vm_state': vm_states.ERROR, 'task_state': None}, context, ex, request_spec) def prep_resize(self, context, image, request_spec, filter_properties, instance, instance_type, reservations): """Tries to call schedule_prep_resize on the driver. Sets instance vm_state to ACTIVE on NoHostFound Sets vm_state to ERROR on other exceptions """ instance_uuid = instance['uuid'] with compute_utils.EventReporter(context, conductor_api.LocalAPI(), 'schedule', instance_uuid): try: request_spec['num_instances'] = len( request_spec['instance_uuids']) hosts = self.driver.select_destinations( context, request_spec, filter_properties) host_state = hosts[0] scheduler_utils.populate_filter_properties(filter_properties, host_state) # context is not serializable filter_properties.pop('context', None) (host, node) = (host_state['host'], host_state['nodename']) attrs = ['metadata', 'system_metadata', 'info_cache', 'security_groups'] inst_obj = instance_obj.Instance._from_db_object( context, instance_obj.Instance(), instance, expected_attrs=attrs) self.compute_rpcapi.prep_resize( context, image, inst_obj, instance_type, host, reservations, request_spec=request_spec, filter_properties=filter_properties, node=node) except exception.NoValidHost as ex: vm_state = instance.get('vm_state', vm_states.ACTIVE) self._set_vm_state_and_notify('prep_resize', {'vm_state': vm_state, 'task_state': None}, context, ex, request_spec) if reservations: QUOTAS.rollback(context, reservations) except Exception as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify('prep_resize', {'vm_state': vm_states.ERROR, 'task_state': None}, context, ex, request_spec) if reservations: QUOTAS.rollback(context, reservations) def _set_vm_state_and_notify(self, method, updates, context, ex, request_spec): scheduler_utils.set_vm_state_and_notify( context, 'scheduler', method, updates, ex, request_spec, self.db) # NOTE(hanlind): This method can be removed in v3.0 of the RPC API. def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context :param host: hostname :returns: example format is below:: {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, 'vcpus_used': 12, 'memory_mb_used': 10240, 'local_gb_used': 64} """ # Getting compute node info and related instances info service_ref = self.db.service_get_by_compute_host(context, host) instance_refs = self.db.instance_get_all_by_host(context, service_ref['host']) # Getting total available/used resource compute_ref = service_ref['compute_node'][0] resource = {'vcpus': compute_ref['vcpus'], 'memory_mb': compute_ref['memory_mb'], 'local_gb': compute_ref['local_gb'], 'vcpus_used': compute_ref['vcpus_used'], 'memory_mb_used': compute_ref['memory_mb_used'], 'local_gb_used': compute_ref['local_gb_used']} usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: vcpus = [i['vcpus'] for i in instance_refs if i['project_id'] == project_id] mem = [i['memory_mb'] for i in instance_refs if i['project_id'] == project_id] root = [i['root_gb'] for i in instance_refs if i['project_id'] == project_id] ephemeral = [i['ephemeral_gb'] for i in instance_refs if i['project_id'] == project_id] usage[project_id] = {'vcpus': sum(vcpus), 'memory_mb': sum(mem), 'root_gb': sum(root), 'ephemeral_gb': sum(ephemeral)} return {'resource': resource, 'usage': usage} @periodic_task.periodic_task def _expire_reservations(self, context): QUOTAS.expire(context) @periodic_task.periodic_task(spacing=CONF.scheduler_driver_task_period, run_immediately=True) def _run_periodic_tasks(self, context): self.driver.run_periodic_tasks(context) # NOTE(russellb) This method can be removed in 3.0 of this API. It is # deprecated in favor of the method in the base API. def get_backdoor_port(self, context): return self.backdoor_port # NOTE(hanlind): This method can be removed in v4.0 of the RPC API. @messaging.expected_exceptions(exception.NoValidHost) def select_hosts(self, context, request_spec, filter_properties): """Returns host(s) best suited for this request_spec and filter_properties. """ dests = self.driver.select_destinations(context, request_spec, filter_properties) hosts = [dest['host'] for dest in dests] return jsonutils.to_primitive(hosts) @messaging.expected_exceptions(exception.NoValidHost) def select_destinations(self, context, request_spec, filter_properties): """Returns destinations(s) best suited for this request_spec and filter_properties. The result should be a list of dicts with 'host', 'nodename' and 'limits' as keys. """ dests = self.driver.select_destinations(context, request_spec, filter_properties) return jsonutils.to_primitive(dests) class _SchedulerManagerV3Proxy(object): target = messaging.Target(version='3.0') def __init__(self, manager): self.manager = manager def select_destinations(self, ctxt, request_spec, filter_properties): return self.manager.select_destinations(ctxt, request_spec=request_spec, filter_properties=filter_properties) def run_instance(self, ctxt, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec): return self.manager.run_instance(ctxt, request_spec=request_spec, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, is_first_time=is_first_time, filter_properties=filter_properties, legacy_bdm_in_spec=legacy_bdm_in_spec) def prep_resize(self, ctxt, instance, instance_type, image, request_spec, filter_properties, reservations): return self.manager.prep_resize(ctxt, instance=instance, instance_type=instance_type, image=image, request_spec=request_spec, filter_properties=filter_properties, reservations=reservations) nova-2014.1.5/nova/scheduler/filters/0000775000567000056700000000000012540643452020445 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/scheduler/filters/metrics_filter.py0000664000567000056700000000367312540642543024043 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Intel, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters from nova.scheduler import utils LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('weight_setting', 'nova.scheduler.weights.metrics', group='metrics') class MetricsFilter(filters.BaseHostFilter): """Metrics Filter This filter is used to filter out those hosts which don't have the corresponding metrics so these the metrics weigher won't fail due to these hosts. """ def __init__(self): super(MetricsFilter, self).__init__() opts = utils.parse_options(CONF.metrics.weight_setting, sep='=', converter=float, name="metrics.weight_setting") self.keys = [x[0] for x in opts] def host_passes(self, host_state, filter_properties): unavail = [i for i in self.keys if i not in host_state.metrics] if unavail: LOG.debug(_("%(host_state)s does not have the following " "metrics: %(metrics)s"), {'host_state': host_state, 'metrics': ', '.join(unavail)}) return len(unavail) == 0 nova-2014.1.5/nova/scheduler/filters/availability_zone_filter.py0000664000567000056700000000353212540642543026074 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import db from nova.scheduler import filters CONF = cfg.CONF CONF.import_opt('default_availability_zone', 'nova.availability_zones') class AvailabilityZoneFilter(filters.BaseHostFilter): """Filters Hosts by availability zone. Works with aggregate metadata availability zones, using the key 'availability_zone' Note: in theory a compute node can be part of multiple availability_zones """ # Availability zones do not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) availability_zone = props.get('availability_zone') if availability_zone: context = filter_properties['context'].elevated() metadata = db.aggregate_metadata_get_by_host( context, host_state.host, key='availability_zone') if 'availability_zone' in metadata: return availability_zone in metadata['availability_zone'] else: return availability_zone == CONF.default_availability_zone return True nova-2014.1.5/nova/scheduler/filters/aggregate_multitenancy_isolation.py0000664000567000056700000000376312540642543027633 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) class AggregateMultiTenancyIsolation(filters.BaseHostFilter): """Isolate tenants in specific aggregates.""" # Aggregate data and tenant do not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): """If a host is in an aggregate that has the metadata key "filter_tenant_id" it can only create instances from that tenant(s). A host can be in different aggregates. If a host doesn't belong to an aggregate with the metadata key "filter_tenant_id" it can create instances from all tenants. """ spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) tenant_id = props.get('project_id') context = filter_properties['context'].elevated() metadata = db.aggregate_metadata_get_by_host(context, host_state.host, key="filter_tenant_id") if metadata != {}: if tenant_id not in metadata["filter_tenant_id"]: LOG.debug(_("%s fails tenant id on aggregate"), host_state) return False return True nova-2014.1.5/nova/scheduler/filters/retry_filter.py0000664000567000056700000000335512540642543023537 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) class RetryFilter(filters.BaseHostFilter): """Filter out nodes that have already been attempted for scheduling purposes """ def host_passes(self, host_state, filter_properties): """Skip nodes that have already been attempted.""" retry = filter_properties.get('retry', None) if not retry: # Re-scheduling is disabled LOG.debug("Re-scheduling is disabled") return True hosts = retry.get('hosts', []) host = [host_state.host, host_state.nodename] passes = host not in hosts pass_msg = "passes" if passes else "fails" LOG.debug(_("Host %(host)s %(pass_msg)s. Previously tried hosts: " "%(hosts)s") % {'host': host, 'pass_msg': pass_msg, 'hosts': hosts}) # Host passes if it's not in the list of previously attempted hosts: return passes nova-2014.1.5/nova/scheduler/filters/aggregate_instance_extra_specs.py0000664000567000056700000000566712540642543027247 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # Copyright (c) 2012 Cloudscaling # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters from nova.scheduler.filters import extra_specs_ops LOG = logging.getLogger(__name__) _SCOPE = 'aggregate_instance_extra_specs' class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter): """AggregateInstanceExtraSpecsFilter works with InstanceType records.""" # Aggregate data and instance type does not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): """Return a list of hosts that can create instance_type Check that the extra specs associated with the instance type match the metadata provided by aggregates. If not present return False. """ instance_type = filter_properties.get('instance_type') if 'extra_specs' not in instance_type: return True context = filter_properties['context'].elevated() metadata = db.aggregate_metadata_get_by_host(context, host_state.host) for key, req in instance_type['extra_specs'].iteritems(): # Either not scope format, or aggregate_instance_extra_specs scope scope = key.split(':', 1) if len(scope) > 1: if scope[0] != _SCOPE: continue else: del scope[0] key = scope[0] aggregate_vals = metadata.get(key, None) if not aggregate_vals: LOG.debug(_("%(host_state)s fails instance_type extra_specs " "requirements. Extra_spec %(key)s is not in aggregate."), {'host_state': host_state, 'key': key}) return False for aggregate_val in aggregate_vals: if extra_specs_ops.match(aggregate_val, req): break else: LOG.debug(_("%(host_state)s fails instance_type extra_specs " "requirements. '%(aggregate_vals)s' do not " "match '%(req)s'"), {'host_state': host_state, 'req': req, 'aggregate_vals': aggregate_vals}) return False return True nova-2014.1.5/nova/scheduler/filters/compute_capabilities_filter.py0000664000567000056700000000601212540642543026550 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters from nova.scheduler.filters import extra_specs_ops LOG = logging.getLogger(__name__) class ComputeCapabilitiesFilter(filters.BaseHostFilter): """HostFilter hard-coded to work with InstanceType records.""" # Instance type and host capabilities do not change within a request run_filter_once_per_request = True def _satisfies_extra_specs(self, host_state, instance_type): """Check that the host_state provided by the compute service satisfy the extra specs associated with the instance type. """ if 'extra_specs' not in instance_type: return True for key, req in instance_type['extra_specs'].iteritems(): # Either not scope format, or in capabilities scope scope = key.split(':') if len(scope) > 1: if scope[0] != "capabilities": continue else: del scope[0] cap = host_state for index in range(0, len(scope)): try: if not isinstance(cap, dict): if getattr(cap, scope[index], None) is None: # If can't find, check stats dict cap = cap.stats.get(scope[index], None) else: cap = getattr(cap, scope[index], None) else: cap = cap.get(scope[index], None) except AttributeError: return False if cap is None: return False if not extra_specs_ops.match(str(cap), req): LOG.debug(_("extra_spec requirement '%(req)s' does not match " "'%(cap)s'"), {'req': req, 'cap': cap}) return False return True def host_passes(self, host_state, filter_properties): """Return a list of hosts that can create instance_type.""" instance_type = filter_properties.get('instance_type') if not self._satisfies_extra_specs(host_state, instance_type): LOG.debug(_("%(host_state)s fails instance_type extra_specs " "requirements"), {'host_state': host_state}) return False return True nova-2014.1.5/nova/scheduler/filters/disk_filter.py0000664000567000056700000000444012540642543023320 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) disk_allocation_ratio_opt = cfg.FloatOpt("disk_allocation_ratio", default=1.0, help="Virtual disk to physical disk allocation ratio") CONF = cfg.CONF CONF.register_opt(disk_allocation_ratio_opt) class DiskFilter(filters.BaseHostFilter): """Disk Filter with over subscription flag.""" def host_passes(self, host_state, filter_properties): """Filter based on disk usage.""" instance_type = filter_properties.get('instance_type') requested_disk = (1024 * (instance_type['root_gb'] + instance_type['ephemeral_gb']) + instance_type['swap']) free_disk_mb = host_state.free_disk_mb total_usable_disk_mb = host_state.total_usable_disk_gb * 1024 disk_mb_limit = total_usable_disk_mb * CONF.disk_allocation_ratio used_disk_mb = total_usable_disk_mb - free_disk_mb usable_disk_mb = disk_mb_limit - used_disk_mb if not usable_disk_mb >= requested_disk: LOG.debug(_("%(host_state)s does not have %(requested_disk)s MB " "usable disk, it only has %(usable_disk_mb)s MB usable " "disk."), {'host_state': host_state, 'requested_disk': requested_disk, 'usable_disk_mb': usable_disk_mb}) return False disk_gb_limit = disk_mb_limit / 1024 host_state.limits['disk_gb'] = disk_gb_limit return True nova-2014.1.5/nova/scheduler/filters/affinity_filter.py0000664000567000056700000001375212540642543024205 0ustar jenkinsjenkins00000000000000# Copyright 2012, Piston Cloud Computing, Inc. # Copyright 2012, OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import netaddr import six from nova.compute import api as compute from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) class AffinityFilter(filters.BaseHostFilter): def __init__(self): self.compute_api = compute.API() class DifferentHostFilter(AffinityFilter): '''Schedule the instance on a different host from a set of instances.''' # The hosts the instances are running on doesn't change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_uuids = scheduler_hints.get('different_host', []) if isinstance(affinity_uuids, six.string_types): affinity_uuids = [affinity_uuids] if affinity_uuids: return not self.compute_api.get_all(context, {'host': host_state.host, 'uuid': affinity_uuids, 'deleted': False}) # With no different_host key return True class SameHostFilter(AffinityFilter): '''Schedule the instance on the same host as another instance in a set of of instances. ''' # The hosts the instances are running on doesn't change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): context = filter_properties['context'] scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_uuids = scheduler_hints.get('same_host', []) if isinstance(affinity_uuids, six.string_types): affinity_uuids = [affinity_uuids] if affinity_uuids: return self.compute_api.get_all(context, {'host': host_state.host, 'uuid': affinity_uuids, 'deleted': False}) # With no same_host key return True class SimpleCIDRAffinityFilter(AffinityFilter): '''Schedule the instance on a host with a particular cidr ''' # The address of a host doesn't change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): scheduler_hints = filter_properties.get('scheduler_hints') or {} affinity_cidr = scheduler_hints.get('cidr', '/24') affinity_host_addr = scheduler_hints.get('build_near_host_ip') host_ip = host_state.host_ip if affinity_host_addr: affinity_net = netaddr.IPNetwork(str.join('', (affinity_host_addr, affinity_cidr))) return netaddr.IPAddress(host_ip) in affinity_net # We don't have an affinity host address. return True class _GroupAntiAffinityFilter(AffinityFilter): """Schedule the instance on a different host from a set of group hosts. """ def __init__(self): super(_GroupAntiAffinityFilter, self).__init__() def host_passes(self, host_state, filter_properties): # Only invoke the filter is 'anti-affinity' is configured policies = filter_properties.get('group_policies', []) if self.policy_name not in policies: return True group_hosts = filter_properties.get('group_hosts') or [] LOG.debug(_("Group anti affinity: check if %(host)s not " "in %(configured)s"), {'host': host_state.host, 'configured': group_hosts}) if group_hosts: return not host_state.host in group_hosts # No groups configured return True class GroupAntiAffinityFilter(_GroupAntiAffinityFilter): def __init__(self): self.policy_name = 'legacy' super(GroupAntiAffinityFilter, self).__init__() class ServerGroupAntiAffinityFilter(_GroupAntiAffinityFilter): def __init__(self): self.policy_name = 'anti-affinity' super(ServerGroupAntiAffinityFilter, self).__init__() class _GroupAffinityFilter(AffinityFilter): """Schedule the instance on to host from a set of group hosts. """ def __init__(self): super(_GroupAffinityFilter, self).__init__() def host_passes(self, host_state, filter_properties): # Only invoke the filter is 'affinity' is configured policies = filter_properties.get('group_policies', []) if self.policy_name not in policies: return True group_hosts = filter_properties.get('group_hosts', []) LOG.debug(_("Group affinity: check if %(host)s in " "%(configured)s"), {'host': host_state.host, 'configured': group_hosts}) if group_hosts: return host_state.host in group_hosts # No groups configured return True class GroupAffinityFilter(_GroupAffinityFilter): def __init__(self): self.policy_name = 'legacy' super(GroupAffinityFilter, self).__init__() class ServerGroupAffinityFilter(_GroupAffinityFilter): def __init__(self): self.policy_name = 'affinity' super(ServerGroupAffinityFilter, self).__init__() nova-2014.1.5/nova/scheduler/filters/ram_filter.py0000664000567000056700000001014112540642543023140 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # Copyright (c) 2012 Cloudscaling # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import db from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) ram_allocation_ratio_opt = cfg.FloatOpt('ram_allocation_ratio', default=1.5, help='Virtual ram to physical ram allocation ratio which affects ' 'all ram filters. This configuration specifies a global ratio ' 'for RamFilter. For AggregateRamFilter, it will fall back to ' 'this configuration value if no per-aggregate setting found.') CONF = cfg.CONF CONF.register_opt(ram_allocation_ratio_opt) class BaseRamFilter(filters.BaseHostFilter): def _get_ram_allocation_ratio(self, host_state, filter_properties): raise NotImplementedError def host_passes(self, host_state, filter_properties): """Only return hosts with sufficient available RAM.""" instance_type = filter_properties.get('instance_type') requested_ram = instance_type['memory_mb'] free_ram_mb = host_state.free_ram_mb total_usable_ram_mb = host_state.total_usable_ram_mb ram_allocation_ratio = self._get_ram_allocation_ratio(host_state, filter_properties) memory_mb_limit = total_usable_ram_mb * ram_allocation_ratio used_ram_mb = total_usable_ram_mb - free_ram_mb usable_ram = memory_mb_limit - used_ram_mb if not usable_ram >= requested_ram: LOG.debug(_("%(host_state)s does not have %(requested_ram)s MB " "usable ram, it only has %(usable_ram)s MB usable ram."), {'host_state': host_state, 'requested_ram': requested_ram, 'usable_ram': usable_ram}) return False # save oversubscription limit for compute node to test against: host_state.limits['memory_mb'] = memory_mb_limit return True class RamFilter(BaseRamFilter): """Ram Filter with over subscription flag.""" def _get_ram_allocation_ratio(self, host_state, filter_properties): return CONF.ram_allocation_ratio class AggregateRamFilter(BaseRamFilter): """AggregateRamFilter with per-aggregate ram subscription flag. Fall back to global ram_allocation_ratio if no per-aggregate setting found. """ def _get_ram_allocation_ratio(self, host_state, filter_properties): context = filter_properties['context'].elevated() # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. metadata = db.aggregate_metadata_get_by_host( context, host_state.host, key='ram_allocation_ratio') aggregate_vals = metadata.get('ram_allocation_ratio', set()) num_values = len(aggregate_vals) if num_values == 0: return CONF.ram_allocation_ratio if num_values > 1: LOG.warning(_("%(num_values)d ratio values found, " "of which the minimum value will be used."), {'num_values': num_values}) try: ratio = float(min(aggregate_vals)) except ValueError as e: LOG.warning(_("Could not decode ram_allocation_ratio: '%s'"), e) ratio = CONF.ram_allocation_ratio return ratio nova-2014.1.5/nova/scheduler/filters/__init__.py0000664000567000056700000000310112540642532022547 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Scheduler host filters """ from nova import filters class BaseHostFilter(filters.BaseFilter): """Base class for host filters.""" def _filter_one(self, obj, filter_properties): """Return True if the object passes the filter, otherwise False.""" return self.host_passes(obj, filter_properties) def host_passes(self, host_state, filter_properties): """Return True if the HostState passes the filter, otherwise False. Override this in a subclass. """ raise NotImplementedError() class HostFilterHandler(filters.BaseFilterHandler): def __init__(self): super(HostFilterHandler, self).__init__(BaseHostFilter) def all_filters(): """Return a list of filter classes found in this directory. This method is used as the default for available scheduler filters and should return a list of all filter classes available. """ return HostFilterHandler().get_all_classes() nova-2014.1.5/nova/scheduler/filters/trusted_filter.py0000664000567000056700000002230112540642543024054 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Intel, Inc. # Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Filter to add support for Trusted Computing Pools. Filter that only schedules tasks on a host if the integrity (trust) of that host matches the trust requested in the `extra_specs' for the flavor. The `extra_specs' will contain a key/value pair where the key is `trust'. The value of this pair (`trusted'/`untrusted') must match the integrity of that host (obtained from the Attestation service) before the task can be scheduled on that host. Note that the parameters to control access to the Attestation Service are in the `nova.conf' file in a separate `trust' section. For example, the config file will look something like: [DEFAULT] verbose=True ... [trust] server=attester.mynetwork.com Details on the specific parameters can be found in the file `trust_attest.py'. Details on setting up and using an Attestation Service can be found at the Open Attestation project at: https://github.com/OpenAttestation/OpenAttestation """ from oslo.config import cfg import requests from nova import context from nova import db from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.scheduler import filters LOG = logging.getLogger(__name__) trusted_opts = [ cfg.StrOpt('attestation_server', help='Attestation server HTTP'), cfg.StrOpt('attestation_server_ca_file', help='Attestation server Cert file for Identity verification'), cfg.StrOpt('attestation_port', default='8443', help='Attestation server port'), cfg.StrOpt('attestation_api_url', default='/OpenAttestationWebServices/V1.0', help='Attestation web API URL'), cfg.StrOpt('attestation_auth_blob', help='Attestation authorization blob - must change'), cfg.IntOpt('attestation_auth_timeout', default=60, help='Attestation status cache valid period length'), cfg.BoolOpt('attestation_insecure_ssl', default=True, help='Disable SSL cert verification for Attestation service') ] CONF = cfg.CONF trust_group = cfg.OptGroup(name='trusted_computing', title='Trust parameters') CONF.register_group(trust_group) CONF.register_opts(trusted_opts, group=trust_group) class AttestationService(object): # Provide access wrapper to attestation server to get integrity report. def __init__(self): self.api_url = CONF.trusted_computing.attestation_api_url self.host = CONF.trusted_computing.attestation_server self.port = CONF.trusted_computing.attestation_port self.auth_blob = CONF.trusted_computing.attestation_auth_blob self.key_file = None self.cert_file = None self.ca_file = CONF.trusted_computing.attestation_server_ca_file self.request_count = 100 # If the CA file is not provided, let's check the cert if verification # asked self.verify = (not CONF.trusted_computing.attestation_insecure_ssl and self.ca_file or True) self.cert = (self.cert_file, self.key_file) def _do_request(self, method, action_url, body, headers): # Connects to the server and issues a request. # :returns: result data # :raises: IOError if the request fails action_url = "https://%s:%s%s/%s" % (self.host, self.port, self.api_url, action_url) try: res = requests.request(method, action_url, data=body, headers=headers, cert=self.cert, verify=self.verify) status_code = res.status_code # pylint: disable=E1101 if status_code in (requests.codes.OK, requests.codes.CREATED, requests.codes.ACCEPTED, requests.codes.NO_CONTENT): try: return requests.codes.OK, jsonutils.loads(res.text) except (TypeError, ValueError): return requests.codes.OK, res.text return status_code, None except requests.exceptions.RequestException: return IOError, None def _request(self, cmd, subcmd, hosts): body = {} body['count'] = len(hosts) body['hosts'] = hosts cooked = jsonutils.dumps(body) headers = {} headers['content-type'] = 'application/json' headers['Accept'] = 'application/json' if self.auth_blob: headers['x-auth-blob'] = self.auth_blob status, res = self._do_request(cmd, subcmd, cooked, headers) return status, res def do_attestation(self, hosts): """Attests compute nodes through OAT service. :param hosts: hosts list to be attested :returns: dictionary for trust level and validate time """ result = None status, data = self._request("POST", "PollHosts", hosts) if data != None: result = data.get('hosts') return result class ComputeAttestationCache(object): """Cache for compute node attestation Cache compute node's trust level for sometime, if the cache is out of date, poll OAT service to flush the cache. OAT service may have cache also. OAT service's cache valid time should be set shorter than trusted filter's cache valid time. """ def __init__(self): self.attestservice = AttestationService() self.compute_nodes = {} admin = context.get_admin_context() # Fetch compute node list to initialize the compute_nodes, # so that we don't need poll OAT service one by one for each # host in the first round that scheduler invokes us. computes = db.compute_node_get_all(admin) for compute in computes: host = compute['hypervisor_hostname'] self._init_cache_entry(host) def _cache_valid(self, host): cachevalid = False if host in self.compute_nodes: node_stats = self.compute_nodes.get(host) if not timeutils.is_older_than( node_stats['vtime'], CONF.trusted_computing.attestation_auth_timeout): cachevalid = True return cachevalid def _init_cache_entry(self, host): self.compute_nodes[host] = { 'trust_lvl': 'unknown', 'vtime': timeutils.normalize_time( timeutils.parse_isotime("1970-01-01T00:00:00Z"))} def _invalidate_caches(self): for host in self.compute_nodes: self._init_cache_entry(host) def _update_cache_entry(self, state): entry = {} host = state['host_name'] entry['trust_lvl'] = state['trust_lvl'] try: # Normalize as naive object to interoperate with utcnow(). entry['vtime'] = timeutils.normalize_time( timeutils.parse_isotime(state['vtime'])) except ValueError: # Mark the system as un-trusted if get invalid vtime. entry['trust_lvl'] = 'unknown' entry['vtime'] = timeutils.utcnow() self.compute_nodes[host] = entry def _update_cache(self): self._invalidate_caches() states = self.attestservice.do_attestation(self.compute_nodes.keys()) if states is None: return for state in states: self._update_cache_entry(state) def get_host_attestation(self, host): """Check host's trust level.""" if host not in self.compute_nodes: self._init_cache_entry(host) if not self._cache_valid(host): self._update_cache() level = self.compute_nodes.get(host).get('trust_lvl') return level class ComputeAttestation(object): def __init__(self): self.caches = ComputeAttestationCache() def is_trusted(self, host, trust): level = self.caches.get_host_attestation(host) return trust == level class TrustedFilter(filters.BaseHostFilter): """Trusted filter to support Trusted Compute Pools.""" def __init__(self): self.compute_attestation = ComputeAttestation() def host_passes(self, host_state, filter_properties): instance = filter_properties.get('instance_type', {}) extra = instance.get('extra_specs', {}) trust = extra.get('trust:trusted_host') host = host_state.nodename if trust: return self.compute_attestation.is_trusted(host, trust) return True nova-2014.1.5/nova/scheduler/filters/compute_filter.py0000664000567000056700000000341712540642543024045 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters from nova import servicegroup CONF = cfg.CONF LOG = logging.getLogger(__name__) class ComputeFilter(filters.BaseHostFilter): """Filter on active Compute nodes.""" def __init__(self): self.servicegroup_api = servicegroup.API() # Host state does not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): """Returns True for only active compute nodes.""" service = host_state.service if service['disabled']: LOG.debug(_("%(host_state)s is disabled, reason: %(reason)s"), {'host_state': host_state, 'reason': service.get('disabled_reason')}) return False else: if not self.servicegroup_api.service_is_up(service): LOG.warn(_("%(host_state)s has not been heard from in a " "while"), {'host_state': host_state}) return False return True nova-2014.1.5/nova/scheduler/filters/extra_specs_ops.py0000664000567000056700000000405512540642543024224 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator # 1. The following operations are supported: # =, s==, s!=, s>=, s>, s<=, s<, , , ==, !=, >=, <= # 2. Note that is handled in a different way below. # 3. If the first word in the extra_specs is not one of the operators, # it is ignored. _op_methods = {'=': lambda x, y: float(x) >= float(y), '': lambda x, y: y in x, '==': lambda x, y: float(x) == float(y), '!=': lambda x, y: float(x) != float(y), '>=': lambda x, y: float(x) >= float(y), '<=': lambda x, y: float(x) <= float(y), 's==': operator.eq, 's!=': operator.ne, 's<': operator.lt, 's<=': operator.le, 's>': operator.gt, 's>=': operator.ge} def match(value, req): words = req.split() op = method = None if words: op = words.pop(0) method = _op_methods.get(op) if op != '' and not method: return value == req if value is None: return False if op == '': # Ex: v1 v2 v3 while True: if words.pop(0) == value: return True if not words: break op = words.pop(0) # remove a keyword if not words: break return False if words and method(value, words[0]): return True return False nova-2014.1.5/nova/scheduler/filters/image_props_filter.py0000664000567000056700000001173012540642543024673 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # Copyright (c) 2012 Canonical Ltd # Copyright (c) 2012 SUSE LINUX Products GmbH # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from distutils import versionpredicate from nova.compute import vm_mode from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters from nova import utils LOG = logging.getLogger(__name__) class ImagePropertiesFilter(filters.BaseHostFilter): """Filter compute nodes that satisfy instance image properties. The ImagePropertiesFilter filters compute nodes that satisfy any architecture, hypervisor type, or virtual machine mode properties specified on the instance's image properties. Image properties are contained in the image dictionary in the request_spec. """ # Image Properties and Compute Capabilities do not change within # a request run_filter_once_per_request = True def _instance_supported(self, host_state, image_props, hypervisor_version): img_arch = image_props.get('architecture', None) img_h_type = image_props.get('hypervisor_type', None) img_vm_mode = image_props.get('vm_mode', None) img_vm_mode = vm_mode.name(img_vm_mode) # get canonical name checked_img_props = (img_arch, img_h_type, img_vm_mode) # Supported if no compute-related instance properties are specified if not any(checked_img_props): return True supp_instances = host_state.supported_instances # Not supported if an instance property is requested but nothing # advertised by the host. if not supp_instances: LOG.debug(_("Instance contains properties %(image_props)s, " "but no corresponding supported_instances are " "advertised by the compute node"), {'image_props': image_props}) return False def _compare_props(props, other_props): for i in props: if i and i not in other_props: return False return True def _compare_product_version(hyper_version, image_props): version_required = image_props.get('hypervisor_version_requires') if not(hypervisor_version and version_required): return True img_prop_predicate = versionpredicate.VersionPredicate( 'image_prop (%s)' % version_required) hyper_ver_str = utils.convert_version_to_str(hyper_version) return img_prop_predicate.satisfied_by(hyper_ver_str) for supp_inst in supp_instances: if _compare_props(checked_img_props, supp_inst): if _compare_product_version(hypervisor_version, image_props): LOG.debug(_("Instance properties %(image_props)s " "are satisfied by compute host hypervisor " "version %(hypervisor_version)s and " "supported instances %(supp_instances)s"), {'image_props': image_props, 'supp_instances': supp_instances, 'hypervisor_version': hypervisor_version}) return True LOG.debug(_("Instance contains properties %(image_props)s " "that are not provided by the compute node " "supported_instances %(supp_instances)s or " "hypervisor version %(hypervisor_version)s do not match"), {'image_props': image_props, 'supp_instances': supp_instances, 'hypervisor_version': hypervisor_version}) return False def host_passes(self, host_state, filter_properties): """Check if host passes specified image properties. Returns True for compute nodes that satisfy image properties contained in the request_spec. """ spec = filter_properties.get('request_spec', {}) image_props = spec.get('image', {}).get('properties', {}) if not self._instance_supported(host_state, image_props, host_state.hypervisor_version): LOG.debug(_("%(host_state)s does not support requested " "instance_properties"), {'host_state': host_state}) return False return True nova-2014.1.5/nova/scheduler/filters/num_instances_filter.py0000664000567000056700000000317212540642543025235 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) max_instances_per_host_opt = cfg.IntOpt("max_instances_per_host", default=50, help="Ignore hosts that have too many instances") CONF = cfg.CONF CONF.register_opt(max_instances_per_host_opt) class NumInstancesFilter(filters.BaseHostFilter): """Filter out hosts with too many instances.""" def host_passes(self, host_state, filter_properties): num_instances = host_state.num_instances max_instances = CONF.max_instances_per_host passes = num_instances < max_instances if not passes: LOG.debug(_("%(host_state)s fails num_instances check: Max " "instances per host is set to %(max_instances)s"), {'host_state': host_state, 'max_instances': max_instances}) return passes nova-2014.1.5/nova/scheduler/filters/json_filter.py0000664000567000056700000001130112540642543023331 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator import six from nova.openstack.common import jsonutils from nova.scheduler import filters class JsonFilter(filters.BaseHostFilter): """Host Filter to allow simple JSON-based grammar for selecting hosts. """ def _op_compare(self, args, op): """Returns True if the specified operator can successfully compare the first item in the args with all the rest. Will return False if only one item is in the list. """ if len(args) < 2: return False if op is operator.contains: bad = args[0] not in args[1:] else: bad = [arg for arg in args[1:] if not op(args[0], arg)] return not bool(bad) def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq) def _less_than(self, args): """First term is < all the other terms.""" return self._op_compare(args, operator.lt) def _greater_than(self, args): """First term is > all the other terms.""" return self._op_compare(args, operator.gt) def _in(self, args): """First term is in set of remaining terms.""" return self._op_compare(args, operator.contains) def _less_than_equal(self, args): """First term is <= all the other terms.""" return self._op_compare(args, operator.le) def _greater_than_equal(self, args): """First term is >= all the other terms.""" return self._op_compare(args, operator.ge) def _not(self, args): """Flip each of the arguments.""" return [not arg for arg in args] def _or(self, args): """True if any arg is True.""" return any(args) def _and(self, args): """True if all args are True.""" return all(args) commands = { '=': _equals, '<': _less_than, '>': _greater_than, 'in': _in, '<=': _less_than_equal, '>=': _greater_than_equal, 'not': _not, 'or': _or, 'and': _and, } def _parse_string(self, string, host_state): """Strings prefixed with $ are capability lookups in the form '$variable' where 'variable' is an attribute in the HostState class. If $variable is a dictionary, you may use: $variable.dictkey """ if not string: return None if not string.startswith("$"): return string path = string[1:].split(".") obj = getattr(host_state, path[0], None) if obj is None: return None for item in path[1:]: obj = obj.get(item, None) if obj is None: return None return obj def _process_filter(self, query, host_state): """Recursively parse the query structure.""" if not query: return True cmd = query[0] method = self.commands[cmd] cooked_args = [] for arg in query[1:]: if isinstance(arg, list): arg = self._process_filter(arg, host_state) elif isinstance(arg, six.string_types): arg = self._parse_string(arg, host_state) if arg is not None: cooked_args.append(arg) result = method(self, cooked_args) return result def host_passes(self, host_state, filter_properties): """Return a list of hosts that can fulfill the requirements specified in the query. """ try: query = filter_properties['scheduler_hints']['query'] except KeyError: query = None if not query: return True # NOTE(comstud): Not checking capabilities or service for # enabled/disabled so that a provided json filter can decide result = self._process_filter(jsonutils.loads(query), host_state) if isinstance(result, list): # If any succeeded, include the host result = any(result) if result: # Filter it out. return True return False nova-2014.1.5/nova/scheduler/filters/aggregate_image_properties_isolation.py0000664000567000056700000000527512540642543030455 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Cloudwatt # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import db from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters opts = [ cfg.StrOpt('aggregate_image_properties_isolation_namespace', help='Force the filter to consider only keys matching ' 'the given namespace.'), cfg.StrOpt('aggregate_image_properties_isolation_separator', default=".", help='The separator used between the namespace and keys'), ] CONF = cfg.CONF CONF.register_opts(opts) LOG = logging.getLogger(__name__) class AggregateImagePropertiesIsolation(filters.BaseHostFilter): """AggregateImagePropertiesIsolation works with image properties.""" # Aggregate data and instance type does not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): """Checks a host in an aggregate that metadata key/value match with image properties. """ cfg_namespace = CONF.aggregate_image_properties_isolation_namespace cfg_separator = CONF.aggregate_image_properties_isolation_separator spec = filter_properties.get('request_spec', {}) image_props = spec.get('image', {}).get('properties', {}) context = filter_properties['context'].elevated() metadata = db.aggregate_metadata_get_by_host(context, host_state.host) for key, options in metadata.iteritems(): if (cfg_namespace and not key.startswith(cfg_namespace + cfg_separator)): continue prop = image_props.get(key) if prop and prop not in options: LOG.debug(_("%(host_state)s fails image aggregate properties " "requirements. Property %(prop)s does not " "match %(options)s."), {'host_state': host_state, 'prop': prop, 'options': options}) return False return True nova-2014.1.5/nova/scheduler/filters/type_filter.py0000664000567000056700000000436512540642543023355 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 The Cloudscaling Group, Inc. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova import db from nova.scheduler import filters class TypeAffinityFilter(filters.BaseHostFilter): """TypeAffinityFilter doesn't allow more then one VM type per host. Note: this works best with ram_weight_multiplier (spread) set to 1 (default). """ def host_passes(self, host_state, filter_properties): """Dynamically limits hosts to one instance type Return False if host has any instance types other then the requested type. Return True if all instance types match or if host is empty. """ instance_type = filter_properties.get('instance_type') context = filter_properties['context'].elevated() instances_other_type = db.instance_get_all_by_host_and_not_type( context, host_state.host, instance_type['id']) return len(instances_other_type) == 0 class AggregateTypeAffinityFilter(filters.BaseHostFilter): """AggregateTypeAffinityFilter limits instance_type by aggregate return True if no instance_type key is set or if the aggregate metadata key 'instance_type' has the instance_type name as a value """ # Aggregate data does not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): instance_type = filter_properties.get('instance_type') context = filter_properties['context'].elevated() metadata = db.aggregate_metadata_get_by_host( context, host_state.host, key='instance_type') return (len(metadata) == 0 or instance_type['name'] in metadata['instance_type']) nova-2014.1.5/nova/scheduler/filters/isolated_hosts_filter.py0000664000567000056700000000637112540642543025417 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.scheduler import filters isolated_opts = [ cfg.ListOpt('isolated_images', default=[], help='Images to run on isolated host'), cfg.ListOpt('isolated_hosts', default=[], help='Host reserved for specific images'), cfg.BoolOpt('restrict_isolated_hosts_to_isolated_images', default=True, help='Whether to force isolated hosts to run only isolated ' 'images'), ] CONF = cfg.CONF CONF.register_opts(isolated_opts) class IsolatedHostsFilter(filters.BaseHostFilter): """Keep specified images to selected hosts.""" # The configuration values do not change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): """Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set to True: | isolated_image | non_isolated_image -------------+----------------+------------------- iso_host | True | False non_iso_host | False | True Result Matrix with 'restrict_isolated_hosts_to_isolated_images' set to False: | isolated_image | non_isolated_image -------------+----------------+------------------- iso_host | True | True non_iso_host | False | True """ # If the configuration does not list any hosts, the filter will always # return True, assuming a configuration error, so letting all hosts # through. isolated_hosts = CONF.isolated_hosts isolated_images = CONF.isolated_images restrict_isolated_hosts_to_isolated_images = (CONF. restrict_isolated_hosts_to_isolated_images) if not isolated_images: # As there are no images to match, return True if the filter is # not restrictive otherwise return False if the host is in the # isolation list. return ((not restrict_isolated_hosts_to_isolated_images) or (host_state.host not in isolated_hosts)) spec = filter_properties.get('request_spec', {}) props = spec.get('instance_properties', {}) image_ref = props.get('image_ref') image_isolated = image_ref in isolated_images host_isolated = host_state.host in isolated_hosts if restrict_isolated_hosts_to_isolated_images: return (image_isolated == host_isolated) else: return (not image_isolated) or host_isolated nova-2014.1.5/nova/scheduler/filters/core_filter.py0000664000567000056700000000764512540642543023330 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # Copyright (c) 2012 Justin Santa Barbara # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import db from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) cpu_allocation_ratio_opt = cfg.FloatOpt('cpu_allocation_ratio', default=16.0, help='Virtual CPU to physical CPU allocation ratio which affects ' 'all CPU filters. This configuration specifies a global ratio ' 'for CoreFilter. For AggregateCoreFilter, it will fall back to ' 'this configuration value if no per-aggregate setting found.') CONF = cfg.CONF CONF.register_opt(cpu_allocation_ratio_opt) class BaseCoreFilter(filters.BaseHostFilter): def _get_cpu_allocation_ratio(self, host_state, filter_properties): raise NotImplementedError def host_passes(self, host_state, filter_properties): """Return True if host has sufficient CPU cores.""" instance_type = filter_properties.get('instance_type') if not instance_type: return True if not host_state.vcpus_total: # Fail safe LOG.warning(_("VCPUs not set; assuming CPU collection broken")) return True instance_vcpus = instance_type['vcpus'] cpu_allocation_ratio = self._get_cpu_allocation_ratio(host_state, filter_properties) vcpus_total = host_state.vcpus_total * cpu_allocation_ratio # Only provide a VCPU limit to compute if the virt driver is reporting # an accurate count of installed VCPUs. (XenServer driver does not) if vcpus_total > 0: host_state.limits['vcpu'] = vcpus_total return (vcpus_total - host_state.vcpus_used) >= instance_vcpus class CoreFilter(BaseCoreFilter): """CoreFilter filters based on CPU core utilization.""" def _get_cpu_allocation_ratio(self, host_state, filter_properties): return CONF.cpu_allocation_ratio class AggregateCoreFilter(BaseCoreFilter): """AggregateCoreFilter with per-aggregate CPU subscription flag. Fall back to global cpu_allocation_ratio if no per-aggregate setting found. """ def _get_cpu_allocation_ratio(self, host_state, filter_properties): context = filter_properties['context'].elevated() # TODO(uni): DB query in filter is a performance hit, especially for # system with lots of hosts. Will need a general solution here to fix # all filters with aggregate DB call things. metadata = db.aggregate_metadata_get_by_host( context, host_state.host, key='cpu_allocation_ratio') aggregate_vals = metadata.get('cpu_allocation_ratio', set()) num_values = len(aggregate_vals) if num_values == 0: return CONF.cpu_allocation_ratio if num_values > 1: LOG.warning(_("%(num_values)d ratio values found, " "of which the minimum value will be used."), {'num_values': num_values}) try: ratio = float(min(aggregate_vals)) except ValueError as e: LOG.warning(_("Could not decode cpu_allocation_ratio: '%s'"), e) ratio = CONF.cpu_allocation_ratio return ratio nova-2014.1.5/nova/scheduler/filters/pci_passthrough_filter.py0000664000567000056700000000312612540642543025570 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 ISP RAS. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler import filters class PciPassthroughFilter(filters.BaseHostFilter): """Pci Passthrough Filter based on PCI request Filter that schedules instances on a host if the host has devices to meet the device requests in the 'extra_specs' for the flavor. PCI resource tracker provides updated summary information about the PCI devices for each host, like: [{"count": 5, "vendor_id": "8086", "product_id": "1520", "extra_info":'{}'}], and VM requests PCI devices via PCI requests, like: [{"count": 1, "vendor_id": "8086", "product_id": "1520",}]. The filter checks if the host passes or not based on this information. """ def host_passes(self, host_state, filter_properties): """Return true if the host has the required PCI devices.""" if not filter_properties.get('pci_requests'): return True return host_state.pci_stats.support_requests( filter_properties.get('pci_requests')) nova-2014.1.5/nova/scheduler/filters/all_hosts_filter.py0000664000567000056700000000166312540642532024360 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler import filters class AllHostsFilter(filters.BaseHostFilter): """NOOP host filter. Returns all hosts.""" # list of hosts doesn't change within a request run_filter_once_per_request = True def host_passes(self, host_state, filter_properties): return True nova-2014.1.5/nova/scheduler/filters/io_ops_filter.py0000664000567000056700000000337412540642543023663 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.scheduler import filters LOG = logging.getLogger(__name__) max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host", default=8, help="Ignore hosts that have too many builds/resizes/snaps/migrations") CONF = cfg.CONF CONF.register_opt(max_io_ops_per_host_opt) class IoOpsFilter(filters.BaseHostFilter): """Filter out hosts with too many concurrent I/O operations.""" def host_passes(self, host_state, filter_properties): """Use information about current vm and task states collected from compute node statistics to decide whether to filter. """ num_io_ops = host_state.num_io_ops max_io_ops = CONF.max_io_ops_per_host passes = num_io_ops < max_io_ops if not passes: LOG.debug(_("%(host_state)s fails I/O ops check: Max IOs per host " "is set to %(max_io_ops)s"), {'host_state': host_state, 'max_io_ops': max_io_ops}) return passes nova-2014.1.5/nova/scheduler/__init__.py0000664000567000056700000000174412540642543021114 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ :mod:`nova.scheduler` -- Scheduler Nodes ===================================================== .. automodule:: nova.scheduler :platform: Unix :synopsis: Module that picks a compute node to run a VM instance. .. moduleauthor:: Sandy Walsh .. moduleauthor:: Ed Leafe .. moduleauthor:: Chris Behrens """ nova-2014.1.5/nova/scheduler/rpcapi.py0000664000567000056700000001217612540642543020634 0ustar jenkinsjenkins00000000000000# Copyright 2013, Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the scheduler manager RPC API. """ from oslo.config import cfg from oslo import messaging from nova.objects import base as objects_base from nova.openstack.common import jsonutils from nova import rpc rpcapi_opts = [ cfg.StrOpt('scheduler_topic', default='scheduler', help='The topic scheduler nodes listen on'), ] CONF = cfg.CONF CONF.register_opts(rpcapi_opts) rpcapi_cap_opt = cfg.StrOpt('scheduler', help='Set a version cap for messages sent to scheduler services') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') class SchedulerAPI(object): '''Client side of the scheduler rpc API. API version history: 1.0 - Initial version. 1.1 - Changes to prep_resize(): - remove instance_uuid, add instance - remove instance_type_id, add instance_type - remove topic, it was unused 1.2 - Remove topic from run_instance, it was unused 1.3 - Remove instance_id, add instance to live_migration 1.4 - Remove update_db from prep_resize 1.5 - Add reservations argument to prep_resize() 1.6 - Remove reservations argument to run_instance() 1.7 - Add create_volume() method, remove topic from live_migration() 2.0 - Remove 1.x backwards compat 2.1 - Add image_id to create_volume() 2.2 - Remove reservations argument to create_volume() 2.3 - Remove create_volume() 2.4 - Change update_service_capabilities() - accepts a list of capabilities 2.5 - Add get_backdoor_port() 2.6 - Add select_hosts() ... Grizzly supports message version 2.6. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.6. 2.7 - Add select_destinations() 2.8 - Deprecate prep_resize() -- JUST KIDDING. It is still used by the compute manager for retries. 2.9 - Added the legacy_bdm_in_spec parameter to run_instance() ... Havana supports message version 2.9. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.9. ... - Deprecated live_migration() call, moved to conductor ... - Deprecated select_hosts() 3.0 - Removed backwards compat ''' VERSION_ALIASES = { 'grizzly': '2.6', 'havana': '2.9', 'icehouse': '3.0', } def __init__(self): super(SchedulerAPI, self).__init__() target = messaging.Target(topic=CONF.scheduler_topic, version='3.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.scheduler, CONF.upgrade_levels.scheduler) serializer = objects_base.NovaObjectSerializer() self.client = rpc.get_client(target, version_cap=version_cap, serializer=serializer) def select_destinations(self, ctxt, request_spec, filter_properties): cctxt = self.client.prepare() return cctxt.call(ctxt, 'select_destinations', request_spec=request_spec, filter_properties=filter_properties) def run_instance(self, ctxt, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec=True): msg_kwargs = {'request_spec': request_spec, 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'is_first_time': is_first_time, 'filter_properties': filter_properties, 'legacy_bdm_in_spec': legacy_bdm_in_spec} cctxt = self.client.prepare() cctxt.cast(ctxt, 'run_instance', **msg_kwargs) def prep_resize(self, ctxt, instance, instance_type, image, request_spec, filter_properties, reservations): instance_p = jsonutils.to_primitive(instance) instance_type_p = jsonutils.to_primitive(instance_type) reservations_p = jsonutils.to_primitive(reservations) image_p = jsonutils.to_primitive(image) cctxt = self.client.prepare() cctxt.cast(ctxt, 'prep_resize', instance=instance_p, instance_type=instance_type_p, image=image_p, request_spec=request_spec, filter_properties=filter_properties, reservations=reservations_p) nova-2014.1.5/nova/scheduler/baremetal_host_manager.py0000664000567000056700000000467212540642543024043 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ from nova.scheduler import host_manager class BaremetalNodeState(host_manager.HostState): """Mutable and immutable information tracked for a host. This is an attempt to remove the ad-hoc data structures previously used and lock down access. """ def update_from_compute_node(self, compute): """Update information about a host from its compute_node info.""" all_ram_mb = compute['memory_mb'] free_disk_mb = compute['free_disk_gb'] * 1024 free_ram_mb = compute['free_ram_mb'] self.free_ram_mb = free_ram_mb self.total_usable_ram_mb = all_ram_mb self.free_disk_mb = free_disk_mb self.vcpus_total = compute['vcpus'] self.vcpus_used = compute['vcpus_used'] def consume_from_instance(self, instance): self.free_ram_mb = 0 self.free_disk_mb = 0 self.vcpus_used = self.vcpus_total def new_host_state(self, host, node, capabilities=None, service=None): """Returns an instance of BaremetalHostState or HostState according to capabilities. If 'baremetal_driver' is in capabilities, it returns an instance of BaremetalHostState. If not, returns an instance of HostState. """ if capabilities is None: capabilities = {} cap = capabilities.get('compute', {}) if bool(cap.get('baremetal_driver')): return BaremetalNodeState(host, node, capabilities, service) else: return host_manager.HostState(host, node, capabilities, service) class BaremetalHostManager(host_manager.HostManager): """Bare-Metal HostManager class.""" # Override. # Yes, this is not a class, and it is OK host_state_cls = new_host_state def __init__(self): super(BaremetalHostManager, self).__init__() nova-2014.1.5/nova/scheduler/caching_scheduler.py0000664000567000056700000000641312540642532023003 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.scheduler import filter_scheduler class CachingScheduler(filter_scheduler.FilterScheduler): """Scheduler to test aggressive caching of the host list. Please note, this is a very opinionated scheduler. Be sure to review the caveats listed here before selecting this scheduler. The aim of this scheduler is to reduce server build times when you have large bursts of server builds, by reducing the time it takes, from the users point of view, to service each schedule request. There are two main parts to scheduling a users request: * getting the current state of the system * using filters and weights to pick the best host This scheduler tries its best to cache in memory the current state of the system, so we don't need to make the expensive call to get the current state of the system while processing a user's request, we can do that query in a periodic task before the user even issues their request. To reduce races, cached info of the chosen host is updated using the existing host state call: consume_from_instance Please note, the way this works, each scheduler worker has its own copy of the cache. So if you run multiple schedulers, you will get more retries, because the data stored on any additional scheduler will be more out of date, than if it was fetched from the database. In a similar way, if you have a high number of server deletes, the extra capacity from those deletes will not show up until the cache is refreshed. """ def __init__(self, *args, **kwargs): super(CachingScheduler, self).__init__(*args, **kwargs) self.all_host_states = None def run_periodic_tasks(self, context): """Called from a periodic tasks in the manager.""" elevated = context.elevated() # NOTE(johngarbutt) Fetching the list of hosts before we get # a user request, so no user requests have to wait while we # fetch the list of hosts. self.all_host_states = self._get_up_hosts(elevated) def _get_all_host_states(self, context): """Called from the filter scheduler, in a template pattern.""" if self.all_host_states is None: # NOTE(johngarbutt) We only get here when we a scheduler request # comes in before the first run of the periodic task. # Rather than raise an error, we fetch the list of hosts. self.all_host_states = self._get_up_hosts(context) return self.all_host_states def _get_up_hosts(self, context): all_hosts_iterator = self.host_manager.get_all_host_states(context) return list(all_hosts_iterator) nova-2014.1.5/nova/scheduler/host_manager.py0000664000567000056700000004544712540642543022034 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Manage hosts in the current zone. """ import collections import UserDict from oslo.config import cfg from nova.compute import task_states from nova.compute import vm_states from nova import db from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.pci import pci_request from nova.pci import pci_stats from nova.scheduler import filters from nova.scheduler import weights host_manager_opts = [ cfg.MultiStrOpt('scheduler_available_filters', default=['nova.scheduler.filters.all_filters'], help='Filter classes available to the scheduler which may ' 'be specified more than once. An entry of ' '"nova.scheduler.filters.standard_filters" ' 'maps to all filters included with nova.'), cfg.ListOpt('scheduler_default_filters', default=[ 'RetryFilter', 'AvailabilityZoneFilter', 'RamFilter', 'ComputeFilter', 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter', 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter', ], help='Which filter class names to use for filtering hosts ' 'when not specified in the request.'), cfg.ListOpt('scheduler_weight_classes', default=['nova.scheduler.weights.all_weighers'], help='Which weight class names to use for weighing hosts'), ] CONF = cfg.CONF CONF.register_opts(host_manager_opts) LOG = logging.getLogger(__name__) class ReadOnlyDict(UserDict.IterableUserDict): """A read-only dict.""" def __init__(self, source=None): self.data = {} self.update(source) def __setitem__(self, key, item): raise TypeError() def __delitem__(self, key): raise TypeError() def clear(self): raise TypeError() def pop(self, key, *args): raise TypeError() def popitem(self): raise TypeError() def update(self, source=None): if source is None: return elif isinstance(source, UserDict.UserDict): self.data = source.data elif isinstance(source, type({})): self.data = source else: raise TypeError() # Representation of a single metric value from a compute node. MetricItem = collections.namedtuple( 'MetricItem', ['value', 'timestamp', 'source']) class HostState(object): """Mutable and immutable information tracked for a host. This is an attempt to remove the ad-hoc data structures previously used and lock down access. """ def __init__(self, host, node, capabilities=None, service=None): self.host = host self.nodename = node self.update_capabilities(capabilities, service) # Mutable available resources. # These will change as resources are virtually "consumed". self.total_usable_ram_mb = 0 self.total_usable_disk_gb = 0 self.disk_mb_used = 0 self.free_ram_mb = 0 self.free_disk_mb = 0 self.vcpus_total = 0 self.vcpus_used = 0 # Additional host information from the compute node stats: self.vm_states = {} self.task_states = {} self.num_instances = 0 self.num_instances_by_project = {} self.num_instances_by_os_type = {} self.num_io_ops = 0 # Other information self.host_ip = None self.hypervisor_type = None self.hypervisor_version = None self.hypervisor_hostname = None self.cpu_info = None self.supported_instances = None # Resource oversubscription values for the compute host: self.limits = {} # Generic metrics from compute nodes self.metrics = {} self.updated = None def update_capabilities(self, capabilities=None, service=None): # Read-only capability dicts if capabilities is None: capabilities = {} self.capabilities = ReadOnlyDict(capabilities) if service is None: service = {} self.service = ReadOnlyDict(service) def _update_metrics_from_compute_node(self, compute): #NOTE(llu): The 'or []' is to avoid json decode failure of None # returned from compute.get, because DB schema allows # NULL in the metrics column metrics = compute.get('metrics', []) or [] if metrics: metrics = jsonutils.loads(metrics) for metric in metrics: # 'name', 'value', 'timestamp' and 'source' are all required # to be valid keys, just let KeyError happen if any one of # them is missing. But we also require 'name' to be True. name = metric['name'] item = MetricItem(value=metric['value'], timestamp=metric['timestamp'], source=metric['source']) if name: self.metrics[name] = item else: LOG.warn(_("Metric name unknown of %r") % item) def update_from_compute_node(self, compute): """Update information about a host from its compute_node info.""" if (self.updated and compute['updated_at'] and self.updated > compute['updated_at']): return all_ram_mb = compute['memory_mb'] # Assume virtual size is all consumed by instances if use qcow2 disk. free_gb = compute['free_disk_gb'] least_gb = compute.get('disk_available_least') if least_gb is not None: if least_gb > free_gb: # can occur when an instance in database is not on host LOG.warn(_("Host has more disk space than database expected" " (%(physical)sgb > %(database)sgb)") % {'physical': least_gb, 'database': free_gb}) free_gb = min(least_gb, free_gb) free_disk_mb = free_gb * 1024 self.disk_mb_used = compute['local_gb_used'] * 1024 #NOTE(jogo) free_ram_mb can be negative self.free_ram_mb = compute['free_ram_mb'] self.total_usable_ram_mb = all_ram_mb self.total_usable_disk_gb = compute['local_gb'] self.free_disk_mb = free_disk_mb self.vcpus_total = compute['vcpus'] self.vcpus_used = compute['vcpus_used'] self.updated = compute['updated_at'] if 'pci_stats' in compute: self.pci_stats = pci_stats.PciDeviceStats(compute['pci_stats']) else: self.pci_stats = None # All virt drivers report host_ip self.host_ip = compute['host_ip'] self.hypervisor_type = compute.get('hypervisor_type') self.hypervisor_version = compute.get('hypervisor_version') self.hypervisor_hostname = compute.get('hypervisor_hostname') self.cpu_info = compute.get('cpu_info') if compute.get('supported_instances'): self.supported_instances = jsonutils.loads( compute.get('supported_instances')) # Don't store stats directly in host_state to make sure these don't # overwrite any values, or get overwritten themselves. Store in self so # filters can schedule with them. stats = compute.get('stats', None) or '{}' self.stats = jsonutils.loads(stats) self.hypervisor_version = compute['hypervisor_version'] # Track number of instances on host self.num_instances = int(self.stats.get('num_instances', 0)) # Track number of instances by project_id project_id_keys = [k for k in self.stats.keys() if k.startswith("num_proj_")] for key in project_id_keys: project_id = key[9:] self.num_instances_by_project[project_id] = int(self.stats[key]) # Track number of instances in certain vm_states vm_state_keys = [k for k in self.stats.keys() if k.startswith("num_vm_")] for key in vm_state_keys: vm_state = key[7:] self.vm_states[vm_state] = int(self.stats[key]) # Track number of instances in certain task_states task_state_keys = [k for k in self.stats.keys() if k.startswith("num_task_")] for key in task_state_keys: task_state = key[9:] self.task_states[task_state] = int(self.stats[key]) # Track number of instances by host_type os_keys = [k for k in self.stats.keys() if k.startswith("num_os_type_")] for key in os_keys: os = key[12:] self.num_instances_by_os_type[os] = int(self.stats[key]) self.num_io_ops = int(self.stats.get('io_workload', 0)) # update metrics self._update_metrics_from_compute_node(compute) def consume_from_instance(self, instance): """Incrementally update host state from an instance.""" disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024 ram_mb = instance['memory_mb'] vcpus = instance['vcpus'] self.free_ram_mb -= ram_mb self.free_disk_mb -= disk_mb self.vcpus_used += vcpus self.updated = timeutils.utcnow() # Track number of instances on host self.num_instances += 1 # Track number of instances by project_id project_id = instance.get('project_id') if project_id not in self.num_instances_by_project: self.num_instances_by_project[project_id] = 0 self.num_instances_by_project[project_id] += 1 # Track number of instances in certain vm_states vm_state = instance.get('vm_state', vm_states.BUILDING) if vm_state not in self.vm_states: self.vm_states[vm_state] = 0 self.vm_states[vm_state] += 1 # Track number of instances in certain task_states task_state = instance.get('task_state') if task_state not in self.task_states: self.task_states[task_state] = 0 self.task_states[task_state] += 1 # Track number of instances by host_type os_type = instance.get('os_type') if os_type not in self.num_instances_by_os_type: self.num_instances_by_os_type[os_type] = 0 self.num_instances_by_os_type[os_type] += 1 pci_requests = pci_request.get_instance_pci_requests(instance) if pci_requests and self.pci_stats: self.pci_stats.apply_requests(pci_requests) vm_state = instance.get('vm_state', vm_states.BUILDING) task_state = instance.get('task_state') if vm_state == vm_states.BUILDING or task_state in [ task_states.RESIZE_MIGRATING, task_states.REBUILDING, task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT, task_states.IMAGE_BACKUP]: self.num_io_ops += 1 def __repr__(self): return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s" % (self.host, self.nodename, self.free_ram_mb, self.free_disk_mb, self.num_io_ops, self.num_instances)) class HostManager(object): """Base HostManager class.""" # Can be overridden in a subclass host_state_cls = HostState def __init__(self): # { (host, hypervisor_hostname) : { : { cap k : v }}} self.service_states = {} self.host_state_map = {} self.filter_handler = filters.HostFilterHandler() self.filter_classes = self.filter_handler.get_matching_classes( CONF.scheduler_available_filters) self.weight_handler = weights.HostWeightHandler() self.weight_classes = self.weight_handler.get_matching_classes( CONF.scheduler_weight_classes) def _choose_host_filters(self, filter_cls_names): """Since the caller may specify which filters to use we need to have an authoritative list of what is permissible. This function checks the filter names against a predefined set of acceptable filters. """ if filter_cls_names is None: filter_cls_names = CONF.scheduler_default_filters if not isinstance(filter_cls_names, (list, tuple)): filter_cls_names = [filter_cls_names] cls_map = dict((cls.__name__, cls) for cls in self.filter_classes) good_filters = [] bad_filters = [] for filter_name in filter_cls_names: if filter_name not in cls_map: bad_filters.append(filter_name) continue good_filters.append(cls_map[filter_name]) if bad_filters: msg = ", ".join(bad_filters) raise exception.SchedulerHostFilterNotFound(filter_name=msg) return good_filters def get_filtered_hosts(self, hosts, filter_properties, filter_class_names=None, index=0): """Filter hosts and return only ones passing all filters.""" def _strip_ignore_hosts(host_map, hosts_to_ignore): ignored_hosts = [] for host in hosts_to_ignore: for (hostname, nodename) in host_map.keys(): if host == hostname: del host_map[(hostname, nodename)] ignored_hosts.append(host) ignored_hosts_str = ', '.join(ignored_hosts) msg = _('Host filter ignoring hosts: %s') LOG.audit(msg % ignored_hosts_str) def _match_forced_hosts(host_map, hosts_to_force): forced_hosts = [] for (hostname, nodename) in host_map.keys(): if hostname not in hosts_to_force: del host_map[(hostname, nodename)] else: forced_hosts.append(hostname) if host_map: forced_hosts_str = ', '.join(forced_hosts) msg = _('Host filter forcing available hosts to %s') else: forced_hosts_str = ', '.join(hosts_to_force) msg = _("No hosts matched due to not matching " "'force_hosts' value of '%s'") LOG.audit(msg % forced_hosts_str) def _match_forced_nodes(host_map, nodes_to_force): forced_nodes = [] for (hostname, nodename) in host_map.keys(): if nodename not in nodes_to_force: del host_map[(hostname, nodename)] else: forced_nodes.append(nodename) if host_map: forced_nodes_str = ', '.join(forced_nodes) msg = _('Host filter forcing available nodes to %s') else: forced_nodes_str = ', '.join(nodes_to_force) msg = _("No nodes matched due to not matching " "'force_nodes' value of '%s'") LOG.audit(msg % forced_nodes_str) filter_classes = self._choose_host_filters(filter_class_names) ignore_hosts = filter_properties.get('ignore_hosts', []) force_hosts = filter_properties.get('force_hosts', []) force_nodes = filter_properties.get('force_nodes', []) if ignore_hosts or force_hosts or force_nodes: # NOTE(deva): we can't assume "host" is unique because # one host may have many nodes. name_to_cls_map = dict([((x.host, x.nodename), x) for x in hosts]) if ignore_hosts: _strip_ignore_hosts(name_to_cls_map, ignore_hosts) if not name_to_cls_map: return [] # NOTE(deva): allow force_hosts and force_nodes independently if force_hosts: _match_forced_hosts(name_to_cls_map, force_hosts) if force_nodes: _match_forced_nodes(name_to_cls_map, force_nodes) if force_hosts or force_nodes: # NOTE(deva): Skip filters when forcing host or node if name_to_cls_map: return name_to_cls_map.values() hosts = name_to_cls_map.itervalues() return self.filter_handler.get_filtered_objects(filter_classes, hosts, filter_properties, index) def get_weighed_hosts(self, hosts, weight_properties): """Weigh the hosts.""" return self.weight_handler.get_weighed_objects(self.weight_classes, hosts, weight_properties) def get_all_host_states(self, context): """Returns a list of HostStates that represents all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. """ # Get resource usage across the available compute nodes: compute_nodes = db.compute_node_get_all(context) seen_nodes = set() for compute in compute_nodes: service = compute['service'] if not service: LOG.warn(_("No service for compute ID %s") % compute['id']) continue host = service['host'] node = compute.get('hypervisor_hostname') state_key = (host, node) capabilities = self.service_states.get(state_key, None) host_state = self.host_state_map.get(state_key) if host_state: host_state.update_capabilities(capabilities, dict(service.iteritems())) else: host_state = self.host_state_cls(host, node, capabilities=capabilities, service=dict(service.iteritems())) self.host_state_map[state_key] = host_state host_state.update_from_compute_node(compute) seen_nodes.add(state_key) # remove compute nodes from host_state_map if they are not active dead_nodes = set(self.host_state_map.keys()) - seen_nodes for state_key in dead_nodes: host, node = state_key LOG.info(_("Removing dead compute node %(host)s:%(node)s " "from scheduler") % {'host': host, 'node': node}) del self.host_state_map[state_key] return self.host_state_map.itervalues() nova-2014.1.5/nova/scheduler/chance.py0000664000567000056700000001077212540642543020577 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Chance (Random) Scheduler implementation """ import random from oslo.config import cfg from nova.compute import rpcapi as compute_rpcapi from nova import exception from nova.openstack.common.gettextutils import _ from nova.scheduler import driver CONF = cfg.CONF CONF.import_opt('compute_topic', 'nova.compute.rpcapi') class ChanceScheduler(driver.Scheduler): """Implements Scheduler as a random node selector.""" def __init__(self, *args, **kwargs): super(ChanceScheduler, self).__init__(*args, **kwargs) self.compute_rpcapi = compute_rpcapi.ComputeAPI() def _filter_hosts(self, request_spec, hosts, filter_properties): """Filter a list of hosts based on request_spec.""" ignore_hosts = filter_properties.get('ignore_hosts', []) hosts = [host for host in hosts if host not in ignore_hosts] return hosts def _schedule(self, context, topic, request_spec, filter_properties): """Picks a host that is up at random.""" elevated = context.elevated() hosts = self.hosts_up(elevated, topic) if not hosts: msg = _("Is the appropriate service running?") raise exception.NoValidHost(reason=msg) hosts = self._filter_hosts(request_spec, hosts, filter_properties) if not hosts: msg = _("Could not find another compute") raise exception.NoValidHost(reason=msg) return random.choice(hosts) def select_destinations(self, context, request_spec, filter_properties): """Selects random destinations.""" num_instances = request_spec['num_instances'] # NOTE(timello): Returns a list of dicts with 'host', 'nodename' and # 'limits' as keys for compatibility with filter_scheduler. dests = [] for i in range(num_instances): host = self._schedule(context, CONF.compute_topic, request_spec, filter_properties) host_state = dict(host=host, nodename=None, limits=None) dests.append(host_state) if len(dests) < num_instances: raise exception.NoValidHost(reason='') return dests def schedule_run_instance(self, context, request_spec, admin_password, injected_files, requested_networks, is_first_time, filter_properties, legacy_bdm_in_spec): """Create and run an instance or instances.""" instance_uuids = request_spec.get('instance_uuids') for num, instance_uuid in enumerate(instance_uuids): request_spec['instance_properties']['launch_index'] = num try: host = self._schedule(context, CONF.compute_topic, request_spec, filter_properties) updated_instance = driver.instance_update_db(context, instance_uuid) self.compute_rpcapi.run_instance(context, instance=updated_instance, host=host, requested_networks=requested_networks, injected_files=injected_files, admin_password=admin_password, is_first_time=is_first_time, request_spec=request_spec, filter_properties=filter_properties, legacy_bdm_in_spec=legacy_bdm_in_spec) except Exception as ex: # NOTE(vish): we don't reraise the exception here to make sure # that all instances in the request get set to # error properly driver.handle_schedule_error(context, ex, instance_uuid, request_spec) nova-2014.1.5/nova/scheduler/utils.py0000664000567000056700000001467312540642543020522 0ustar jenkinsjenkins00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for scheduling.""" import sys from nova.compute import flavors from nova.compute import utils as compute_utils from nova import db from nova import notifications from nova.objects import base as obj_base from nova.objects import instance as instance_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import rpc LOG = logging.getLogger(__name__) def build_request_spec(ctxt, image, instances, instance_type=None): """Build a request_spec for the scheduler. The request_spec assumes that all instances to be scheduled are the same type. """ instance = instances[0] if isinstance(instance, instance_obj.Instance): instance = obj_base.obj_to_primitive(instance) if instance_type is None: instance_type = flavors.extract_flavor(instance) # NOTE(comstud): This is a bit ugly, but will get cleaned up when # we're passing an InstanceType internal object. extra_specs = db.flavor_extra_specs_get(ctxt, instance_type['flavorid']) instance_type['extra_specs'] = extra_specs request_spec = { 'image': image or {}, 'instance_properties': instance, 'instance_type': instance_type, 'num_instances': len(instances), # NOTE(alaski): This should be removed as logic moves from the # scheduler to conductor. Provides backwards compatibility now. 'instance_uuids': [inst['uuid'] for inst in instances]} return jsonutils.to_primitive(request_spec) def set_vm_state_and_notify(context, service, method, updates, ex, request_spec, db): """changes VM state and notifies.""" LOG.warning(_("Failed to %(service)s_%(method)s: %(ex)s"), {'service': service, 'method': method, 'ex': ex}) vm_state = updates['vm_state'] properties = request_spec.get('instance_properties', {}) # NOTE(vish): We shouldn't get here unless we have a catastrophic # failure, so just set all instances to error. if uuid # is not set, instance_uuids will be set to [None], this # is solely to preserve existing behavior and can # be removed along with the 'if instance_uuid:' if we can # verify that uuid is always set. uuids = [properties.get('uuid')] from nova.conductor import api as conductor_api conductor = conductor_api.LocalAPI() notifier = rpc.get_notifier(service) for instance_uuid in request_spec.get('instance_uuids') or uuids: if instance_uuid: state = vm_state.upper() LOG.warning(_('Setting instance to %s state.'), state, instance_uuid=instance_uuid) # update instance state and notify on the transition (old_ref, new_ref) = db.instance_update_and_get_original( context, instance_uuid, updates) notifications.send_update(context, old_ref, new_ref, service=service) compute_utils.add_instance_fault_from_exc(context, conductor, new_ref, ex, sys.exc_info()) payload = dict(request_spec=request_spec, instance_properties=properties, instance_id=instance_uuid, state=vm_state, method=method, reason=ex) event_type = '%s.%s' % (service, method) notifier.error(context, event_type, payload) def populate_filter_properties(filter_properties, host_state): """Add additional information to the filter properties after a node has been selected by the scheduling process. """ if isinstance(host_state, dict): host = host_state['host'] nodename = host_state['nodename'] limits = host_state['limits'] else: host = host_state.host nodename = host_state.nodename limits = host_state.limits # Adds a retry entry for the selected compute host and node: _add_retry_host(filter_properties, host, nodename) # Adds oversubscription policy if not filter_properties.get('force_hosts'): filter_properties['limits'] = limits def _add_retry_host(filter_properties, host, node): """Add a retry entry for the selected compute node. In the event that the request gets re-scheduled, this entry will signal that the given node has already been tried. """ retry = filter_properties.get('retry', None) force_hosts = filter_properties.get('force_hosts', []) force_nodes = filter_properties.get('force_nodes', []) if not retry or force_hosts or force_nodes: return hosts = retry['hosts'] hosts.append([host, node]) def parse_options(opts, sep='=', converter=str, name=""): """Parse a list of options, each in the format of . Also use the converter to convert the value into desired type. :params opts: list of options, e.g. from oslo.config.cfg.ListOpt :params sep: the separator :params converter: callable object to convert the value, should raise ValueError for conversion failure :params name: name of the option :returns: a lists of tuple of values (key, converted_value) """ good = [] bad = [] for opt in opts: try: key, seen_sep, value = opt.partition(sep) value = converter(value) except ValueError: key = None value = None if key and seen_sep and value is not None: good.append((key, value)) else: bad.append(opt) if bad: LOG.warn(_("Ignoring the invalid elements of the option " "%(name)s: %(options)s"), {'name': name, 'options': ", ".join(bad)}) return good nova-2014.1.5/nova/servicegroup/0000775000567000056700000000000012540643452017534 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/servicegroup/__init__.py0000664000567000056700000000144712540642532021651 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # Copyright (c) AT&T Labs Inc. 2012 Yun Mao # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ The membership service for Nova. Different implementations can be plugged according to the Nova configuration. """ from nova.servicegroup import api API = api.API nova-2014.1.5/nova/servicegroup/api.py0000664000567000056700000001531212540642543020661 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # Copyright (c) AT&T Labs Inc. 2012 Yun Mao # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Define APIs for the servicegroup access.""" import random from oslo.config import cfg from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import utils LOG = logging.getLogger(__name__) _default_driver = 'db' servicegroup_driver_opt = cfg.StrOpt('servicegroup_driver', default=_default_driver, help='The driver for servicegroup ' 'service (valid options are: ' 'db, zk, mc)') CONF = cfg.CONF CONF.register_opt(servicegroup_driver_opt) # NOTE(geekinutah): By default drivers wait 5 seconds before reporting INITIAL_REPORTING_DELAY = 5 class API(object): _driver = None _driver_name_class_mapping = { 'db': 'nova.servicegroup.drivers.db.DbDriver', 'zk': 'nova.servicegroup.drivers.zk.ZooKeeperDriver', 'mc': 'nova.servicegroup.drivers.mc.MemcachedDriver' } def __new__(cls, *args, **kwargs): '''Create an instance of the servicegroup API. args and kwargs are passed down to the servicegroup driver when it gets created. No args currently exist, though. Valid kwargs are: db_allowed - Boolean. False if direct db access is not allowed and alternative data access (conductor) should be used instead. ''' if not cls._driver: LOG.debug(_('ServiceGroup driver defined as an instance of %s'), str(CONF.servicegroup_driver)) driver_name = CONF.servicegroup_driver try: driver_class = cls._driver_name_class_mapping[driver_name] except KeyError: raise TypeError(_("unknown ServiceGroup driver name: %s") % driver_name) cls._driver = importutils.import_object(driver_class, *args, **kwargs) utils.check_isinstance(cls._driver, ServiceGroupDriver) # we don't have to check that cls._driver is not NONE, # check_isinstance does it return super(API, cls).__new__(cls) def __init__(self, *args, **kwargs): self.basic_config_check() def basic_config_check(self): """Perform basic config check.""" # Make sure report interval is less than service down time report_interval = CONF.report_interval if CONF.service_down_time <= report_interval: new_service_down_time = int(report_interval * 2.5) LOG.warn(_("Report interval must be less than service down " "time. Current config: . Setting service_down_time to: " "%(new_service_down_time)s"), {'service_down_time': CONF.service_down_time, 'report_interval': report_interval, 'new_service_down_time': new_service_down_time}) CONF.set_override('service_down_time', new_service_down_time) def join(self, member_id, group_id, service=None): """Add a new member to the ServiceGroup @param member_id: the joined member ID @param group_id: the group name, of the joined member @param service: the parameter can be used for notifications about disconnect mode and update some internals """ msg = _('Join new ServiceGroup member %(member_id)s to the ' '%(group_id)s group, service = %(service)s') LOG.debug(msg, {'member_id': member_id, 'group_id': group_id, 'service': service}) return self._driver.join(member_id, group_id, service) def service_is_up(self, member): """Check if the given member is up.""" msg = _('Check if the given member [%s] is part of the ' 'ServiceGroup, is up') LOG.debug(msg, member) return self._driver.is_up(member) def leave(self, member_id, group_id): """Explicitly remove the given member from the ServiceGroup monitoring. """ msg = _('Explicitly remove the given member %(member_id)s from the' '%(group_id)s group monitoring') LOG.debug(msg, {'member_id': member_id, 'group_id': group_id}) return self._driver.leave(member_id, group_id) def get_all(self, group_id): """Returns ALL members of the given group.""" LOG.debug(_('Returns ALL members of the [%s] ' 'ServiceGroup'), group_id) return self._driver.get_all(group_id) def get_one(self, group_id): """Returns one member of the given group. The strategy to select the member is decided by the driver (e.g. random or round-robin). """ LOG.debug(_('Returns one member of the [%s] group'), group_id) return self._driver.get_one(group_id) class ServiceGroupDriver(object): """Base class for ServiceGroup drivers.""" def join(self, member_id, group_id, service=None): """Join the given service with it's group.""" raise NotImplementedError() def is_up(self, member): """Check whether the given member is up.""" raise NotImplementedError() def leave(self, member_id, group_id): """Remove the given member from the ServiceGroup monitoring.""" raise NotImplementedError() def get_all(self, group_id): """Returns ALL members of the given group.""" raise NotImplementedError() def get_one(self, group_id): """The default behavior of get_one is to randomly pick one from the result of get_all(). This is likely to be overridden in the actual driver implementation. """ members = self.get_all(group_id) if members is None: return None length = len(members) if length == 0: return None return random.choice(members) nova-2014.1.5/nova/servicegroup/drivers/0000775000567000056700000000000012540643452021212 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/servicegroup/drivers/mc.py0000664000567000056700000001014612540642543022165 0ustar jenkinsjenkins00000000000000# Service heartbeat driver using Memcached # Copyright (c) 2013 Akira Yoshiyama # # This is derived from nova/servicegroup/drivers/db.py. # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo.config import cfg from nova import conductor from nova import context from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import memorycache from nova.openstack.common import timeutils from nova.servicegroup import api CONF = cfg.CONF CONF.import_opt('service_down_time', 'nova.service') CONF.import_opt('memcached_servers', 'nova.openstack.common.memorycache') LOG = logging.getLogger(__name__) class MemcachedDriver(api.ServiceGroupDriver): def __init__(self, *args, **kwargs): test = kwargs.get('test') if not CONF.memcached_servers and not test: raise RuntimeError(_('memcached_servers not defined')) self.mc = memorycache.get_client() self.db_allowed = kwargs.get('db_allowed', True) self.conductor_api = conductor.API(use_local=self.db_allowed) def join(self, member_id, group_id, service=None): """Join the given service with its group.""" msg = _('Memcached_Driver: join new ServiceGroup member ' '%(member_id)s to the %(group_id)s group, ' 'service = %(service)s') LOG.debug(msg, {'member_id': member_id, 'group_id': group_id, 'service': service}) if service is None: raise RuntimeError(_('service is a mandatory argument for ' 'Memcached based ServiceGroup driver')) report_interval = service.report_interval if report_interval: service.tg.add_timer(report_interval, self._report_state, api.INITIAL_REPORTING_DELAY, service) def is_up(self, service_ref): """Moved from nova.utils Check whether a service is up based on last heartbeat. """ key = "%(topic)s:%(host)s" % service_ref return self.mc.get(str(key)) is not None def get_all(self, group_id): """Returns ALL members of the given group """ LOG.debug(_('Memcached_Driver: get_all members of the %s group') % group_id) rs = [] ctxt = context.get_admin_context() services = self.conductor_api.service_get_all_by_topic(ctxt, group_id) for service in services: if self.is_up(service): rs.append(service['host']) return rs def _report_state(self, service): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() try: key = "%(topic)s:%(host)s" % service.service_ref # memcached has data expiration time capability. # set(..., time=CONF.service_down_time) uses it and # reduces key-deleting code. self.mc.set(str(key), timeutils.utcnow(), time=CONF.service_down_time) # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): service.model_disconnected = False LOG.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(service, 'model_disconnected', False): service.model_disconnected = True LOG.exception(_('model server went away')) nova-2014.1.5/nova/servicegroup/drivers/zk.py0000664000567000056700000001423512540642543022215 0ustar jenkinsjenkins00000000000000# Copyright (c) AT&T 2012-2013 Yun Mao # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import eventlet from oslo.config import cfg from nova import exception from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import loopingcall from nova.servicegroup import api evzookeeper = importutils.try_import('evzookeeper') membership = importutils.try_import('evzookeeper.membership') zookeeper = importutils.try_import('zookeeper') zk_driver_opts = [ cfg.StrOpt('address', help='The ZooKeeper addresses for servicegroup service in the ' 'format of host1:port,host2:port,host3:port'), cfg.IntOpt('recv_timeout', default=4000, help='The recv_timeout parameter for the zk session'), cfg.StrOpt('sg_prefix', default="/servicegroups", help='The prefix used in ZooKeeper to store ephemeral nodes'), cfg.IntOpt('sg_retry_interval', default=5, help='Number of seconds to wait until retrying to join the ' 'session'), ] CONF = cfg.CONF CONF.register_opts(zk_driver_opts, group="zookeeper") LOG = logging.getLogger(__name__) class ZooKeeperDriver(api.ServiceGroupDriver): """ZooKeeper driver for the service group API.""" def __init__(self, *args, **kwargs): """Create the zk session object.""" if not all([evzookeeper, membership, zookeeper]): raise ImportError('zookeeper module not found') null = open(os.devnull, "w") self._session = evzookeeper.ZKSession(CONF.zookeeper.address, recv_timeout= CONF.zookeeper.recv_timeout, zklog_fd=null) self._memberships = {} self._monitors = {} # Make sure the prefix exists try: self._session.create(CONF.zookeeper.sg_prefix, "", acl=[evzookeeper.ZOO_OPEN_ACL_UNSAFE]) except zookeeper.NodeExistsException: pass super(ZooKeeperDriver, self).__init__() def join(self, member_id, group, service=None): """Join the given service with its group.""" LOG.debug(_('ZooKeeperDriver: join new member %(id)s to the ' '%(gr)s group, service=%(sr)s'), {'id': member_id, 'gr': group, 'sr': service}) member = self._memberships.get((group, member_id), None) if member is None: # the first time to join. Generate a new object path = "%s/%s" % (CONF.zookeeper.sg_prefix, group) try: member = membership.Membership(self._session, path, member_id) except RuntimeError: LOG.exception(_("Unable to join. It is possible that either " "another node exists with the same name, or " "this node just restarted. We will try " "again in a short while to make sure.")) eventlet.sleep(CONF.zookeeper.sg_retry_interval) member = membership.Membership(self._session, path, member_id) self._memberships[(group, member_id)] = member return FakeLoopingCall(self, member_id, group) def leave(self, member_id, group): """Remove the given member from the service group.""" LOG.debug(_('ZooKeeperDriver.leave: %(member)s from group %(group)s'), {'member': member_id, 'group': group}) try: key = (group, member_id) member = self._memberships[key] member.leave() del self._memberships[key] except KeyError: LOG.error(_('ZooKeeperDriver.leave: %(id)s has not joined to the ' '%(gr)s group'), {'id': member_id, 'gr': group}) def is_up(self, service_ref): group_id = service_ref['topic'] member_id = service_ref['host'] all_members = self.get_all(group_id) return member_id in all_members def get_all(self, group_id): """Return all members in a list, or a ServiceGroupUnavailable exception. """ monitor = self._monitors.get(group_id, None) if monitor is None: path = "%s/%s" % (CONF.zookeeper.sg_prefix, group_id) monitor = membership.MembershipMonitor(self._session, path) self._monitors[group_id] = monitor # Note(maoy): When initialized for the first time, it takes a # while to retrieve all members from zookeeper. To prevent # None to be returned, we sleep 5 sec max to wait for data to # be ready. for _retry in range(50): eventlet.sleep(0.1) all_members = monitor.get_all() if all_members is not None: return all_members all_members = monitor.get_all() if all_members is None: raise exception.ServiceGroupUnavailable(driver="ZooKeeperDriver") return all_members class FakeLoopingCall(loopingcall.LoopingCallBase): """The fake Looping Call implementation, created for backward compatibility with a membership based on DB. """ def __init__(self, driver, host, group): self._driver = driver self._group = group self._host = host def stop(self): self._driver.leave(self._host, self._group) def start(self, interval, initial_delay=None): pass def wait(self): pass nova-2014.1.5/nova/servicegroup/drivers/db.py0000664000567000056700000001046012540642543022152 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo.config import cfg import six from nova import conductor from nova import context from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.servicegroup import api CONF = cfg.CONF CONF.import_opt('service_down_time', 'nova.service') LOG = logging.getLogger(__name__) class DbDriver(api.ServiceGroupDriver): def __init__(self, *args, **kwargs): self.db_allowed = kwargs.get('db_allowed', True) self.conductor_api = conductor.API(use_local=self.db_allowed) def join(self, member_id, group_id, service=None): """Join the given service with it's group.""" msg = _('DB_Driver: join new ServiceGroup member %(member_id)s to ' 'the %(group_id)s group, service = %(service)s') LOG.debug(msg, {'member_id': member_id, 'group_id': group_id, 'service': service}) if service is None: raise RuntimeError(_('service is a mandatory argument for DB based' ' ServiceGroup driver')) report_interval = service.report_interval if report_interval: service.tg.add_timer(report_interval, self._report_state, api.INITIAL_REPORTING_DELAY, service) def is_up(self, service_ref): """Moved from nova.utils Check whether a service is up based on last heartbeat. """ last_heartbeat = service_ref['updated_at'] or service_ref['created_at'] if isinstance(last_heartbeat, six.string_types): # NOTE(russellb) If this service_ref came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) # Timestamps in DB are UTC. elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) LOG.debug('DB_Driver.is_up last_heartbeat = %(lhb)s elapsed = %(el)s', {'lhb': str(last_heartbeat), 'el': str(elapsed)}) return abs(elapsed) <= CONF.service_down_time def get_all(self, group_id): """Returns ALL members of the given group """ LOG.debug(_('DB_Driver: get_all members of the %s group') % group_id) rs = [] ctxt = context.get_admin_context() services = self.conductor_api.service_get_all_by_topic(ctxt, group_id) for service in services: if self.is_up(service): rs.append(service['host']) return rs def _report_state(self, service): """Update the state of this service in the datastore.""" ctxt = context.get_admin_context() state_catalog = {} try: report_count = service.service_ref['report_count'] + 1 state_catalog['report_count'] = report_count service.service_ref = self.conductor_api.service_update(ctxt, service.service_ref, state_catalog) # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): service.model_disconnected = False LOG.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 if not getattr(service, 'model_disconnected', False): service.model_disconnected = True LOG.exception(_('model server went away')) nova-2014.1.5/nova/servicegroup/drivers/__init__.py0000664000567000056700000000000012540642532023307 0ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/config.py0000664000567000056700000000263112540642543016640 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from nova import debugger from nova.openstack.common.db import options from nova import paths from nova import rpc from nova import version _DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('nova.sqlite') def parse_args(argv, default_config_files=None): options.set_defaults(sql_connection=_DEFAULT_SQL_CONNECTION, sqlite_db='nova.sqlite') rpc.set_defaults(control_exchange='nova') debugger.register_cli_opts() cfg.CONF(argv[1:], project='nova', version=version.version_string(), default_config_files=default_config_files) rpc.init(cfg.CONF) nova-2014.1.5/nova/consoleauth/0000775000567000056700000000000012540643452017343 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/consoleauth/manager.py0000664000567000056700000001130312540642543021325 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Auth Components for Consoles.""" import time from oslo.config import cfg from oslo import messaging from nova.cells import rpcapi as cells_rpcapi from nova.compute import rpcapi as compute_rpcapi from nova import manager from nova.objects import instance as instance_obj from nova.openstack.common.gettextutils import _ from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova.openstack.common import memorycache LOG = logging.getLogger(__name__) consoleauth_opts = [ cfg.IntOpt('console_token_ttl', default=600, help='How many seconds before deleting tokens') ] CONF = cfg.CONF CONF.register_opts(consoleauth_opts) CONF.import_opt('enable', 'nova.cells.opts', group='cells') class ConsoleAuthManager(manager.Manager): """Manages token based authentication.""" target = messaging.Target(version='2.0') def __init__(self, scheduler_driver=None, *args, **kwargs): super(ConsoleAuthManager, self).__init__(service_name='consoleauth', *args, **kwargs) self.mc = memorycache.get_client() self.compute_rpcapi = compute_rpcapi.ComputeAPI() self.cells_rpcapi = cells_rpcapi.CellsAPI() def _get_tokens_for_instance(self, instance_uuid): tokens_str = self.mc.get(instance_uuid.encode('UTF-8')) if not tokens_str: tokens = [] else: tokens = jsonutils.loads(tokens_str) return tokens def authorize_console(self, context, token, console_type, host, port, internal_access_path, instance_uuid): token_dict = {'token': token, 'instance_uuid': instance_uuid, 'console_type': console_type, 'host': host, 'port': port, 'internal_access_path': internal_access_path, 'last_activity_at': time.time()} data = jsonutils.dumps(token_dict) self.mc.set(token.encode('UTF-8'), data, CONF.console_token_ttl) tokens = self._get_tokens_for_instance(instance_uuid) # Remove the expired tokens from cache. for tok in tokens: token_str = self.mc.get(tok.encode('UTF-8')) if not token_str: tokens.remove(tok) tokens.append(token) self.mc.set(instance_uuid.encode('UTF-8'), jsonutils.dumps(tokens)) LOG.audit(_("Received Token: %(token)s, %(token_dict)s"), {'token': token, 'token_dict': token_dict}) def _validate_token(self, context, token): instance_uuid = token['instance_uuid'] if instance_uuid is None: return False # NOTE(comstud): consoleauth was meant to run in API cells. So, # if cells is enabled, we must call down to the child cell for # the instance. if CONF.cells.enable: return self.cells_rpcapi.validate_console_port(context, instance_uuid, token['port'], token['console_type']) instance = instance_obj.Instance.get_by_uuid(context, instance_uuid) return self.compute_rpcapi.validate_console_port(context, instance, token['port'], token['console_type']) def check_token(self, context, token): token_str = self.mc.get(token.encode('UTF-8')) token_valid = (token_str is not None) LOG.audit(_("Checking Token: %(token)s, %(token_valid)s"), {'token': token, 'token_valid': token_valid}) if token_valid: token = jsonutils.loads(token_str) if self._validate_token(context, token): return token def delete_tokens_for_instance(self, context, instance_uuid): tokens = self._get_tokens_for_instance(instance_uuid) for token in tokens: self.mc.delete(token.encode('UTF-8')) self.mc.delete(instance_uuid.encode('UTF-8')) nova-2014.1.5/nova/consoleauth/__init__.py0000664000567000056700000000165112540642543021457 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module to authenticate Consoles.""" from oslo.config import cfg consoleauth_topic_opt = cfg.StrOpt('consoleauth_topic', default='consoleauth', help='The topic console auth proxy nodes listen on') CONF = cfg.CONF CONF.register_opt(consoleauth_topic_opt) nova-2014.1.5/nova/consoleauth/rpcapi.py0000664000567000056700000000647512540642543021207 0ustar jenkinsjenkins00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the consoleauth RPC API. """ from oslo.config import cfg from oslo import messaging from nova import rpc CONF = cfg.CONF rpcapi_cap_opt = cfg.StrOpt('consoleauth', help='Set a version cap for messages sent to consoleauth services') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') class ConsoleAuthAPI(object): '''Client side of the consoleauth rpc API. API version history: 1.0 - Initial version. 1.1 - Added get_backdoor_port() 1.2 - Added instance_uuid to authorize_console, and delete_tokens_for_instance ... Grizzly and Havana support message version 1.2. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 1.2. 2.0 - Major API rev for Icehouse ''' VERSION_ALIASES = { 'grizzly': '1.2', 'havana': '1.2', } def __init__(self): super(ConsoleAuthAPI, self).__init__() target = messaging.Target(topic=CONF.consoleauth_topic, version='2.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.consoleauth, CONF.upgrade_levels.consoleauth) self.client = rpc.get_client(target, version_cap=version_cap) def authorize_console(self, ctxt, token, console_type, host, port, internal_access_path, instance_uuid): # The remote side doesn't return anything, but we want to block # until it completes.' version = '2.0' if not self.client.can_send_version('2.0'): # NOTE(russellb) Havana compat version = '1.2' cctxt = self.client.prepare(version=version) return cctxt.call(ctxt, 'authorize_console', token=token, console_type=console_type, host=host, port=port, internal_access_path=internal_access_path, instance_uuid=instance_uuid) def check_token(self, ctxt, token): version = '2.0' if not self.client.can_send_version('2.0'): # NOTE(russellb) Havana compat version = '1.0' cctxt = self.client.prepare(version=version) return cctxt.call(ctxt, 'check_token', token=token) def delete_tokens_for_instance(self, ctxt, instance_uuid): version = '2.0' if not self.client.can_send_version('2.0'): # NOTE(russellb) Havana compat version = '1.2' cctxt = self.client.prepare(version=version) return cctxt.cast(ctxt, 'delete_tokens_for_instance', instance_uuid=instance_uuid) nova-2014.1.5/nova/loadables.py0000664000567000056700000001040512540642543017317 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Generic Loadable class support. Meant to be used by such things as scheduler filters and weights where we want to load modules from certain directories and find certain types of classes within those modules. Note that this is quite different than generic plugins and the pluginmanager code that exists elsewhere. Usage: Create a directory with an __init__.py with code such as: class SomeLoadableClass(object): pass class MyLoader(nova.loadables.BaseLoader) def __init__(self): super(MyLoader, self).__init__(SomeLoadableClass) If you create modules in the same directory and subclass SomeLoadableClass within them, MyLoader().get_all_classes() will return a list of such classes. """ import inspect import os import sys from nova import exception from nova.openstack.common import importutils class BaseLoader(object): def __init__(self, loadable_cls_type): mod = sys.modules[self.__class__.__module__] self.path = os.path.abspath(mod.__path__[0]) self.package = mod.__package__ self.loadable_cls_type = loadable_cls_type def _is_correct_class(self, obj): """Return whether an object is a class of the correct type and is not prefixed with an underscore. """ return (inspect.isclass(obj) and (not obj.__name__.startswith('_')) and issubclass(obj, self.loadable_cls_type)) def _get_classes_from_module(self, module_name): """Get the classes from a module that match the type we want.""" classes = [] module = importutils.import_module(module_name) for obj_name in dir(module): # Skip objects that are meant to be private. if obj_name.startswith('_'): continue itm = getattr(module, obj_name) if self._is_correct_class(itm): classes.append(itm) return classes def get_all_classes(self): """Get the classes of the type we want from all modules found in the directory that defines this class. """ classes = [] for dirpath, dirnames, filenames in os.walk(self.path): relpath = os.path.relpath(dirpath, self.path) if relpath == '.': relpkg = '' else: relpkg = '.%s' % '.'.join(relpath.split(os.sep)) for fname in filenames: root, ext = os.path.splitext(fname) if ext != '.py' or root == '__init__': continue module_name = "%s%s.%s" % (self.package, relpkg, root) mod_classes = self._get_classes_from_module(module_name) classes.extend(mod_classes) return classes def get_matching_classes(self, loadable_class_names): """Get loadable classes from a list of names. Each name can be a full module path or the full path to a method that returns classes to use. The latter behavior is useful to specify a method that returns a list of classes to use in a default case. """ classes = [] for cls_name in loadable_class_names: obj = importutils.import_class(cls_name) if self._is_correct_class(obj): classes.append(obj) elif inspect.isfunction(obj): # Get list of classes from a function for cls in obj(): classes.append(cls) else: error_str = 'Not a class of the correct type' raise exception.ClassNotFound(class_name=cls_name, exception=error_str) return classes nova-2014.1.5/nova/debugger.py0000664000567000056700000000565612540642543017171 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(markmc): this is imported before monkey patching in nova.cmd # so we avoid extra imports here import sys def enabled(): return ('--remote_debug-host' in sys.argv and '--remote_debug-port' in sys.argv) def register_cli_opts(): from oslo.config import cfg cli_opts = [ cfg.StrOpt('host', help='Debug host (IP or name) to connect. Note ' 'that using the remote debug option changes how ' 'Nova uses the eventlet library to support async IO. ' 'This could result in failures that do not occur ' 'under normal operation. Use at your own risk.'), cfg.IntOpt('port', help='Debug port to connect. Note ' 'that using the remote debug option changes how ' 'Nova uses the eventlet library to support async IO. ' 'This could result in failures that do not occur ' 'under normal operation. Use at your own risk.') ] cfg.CONF.register_cli_opts(cli_opts, 'remote_debug') def init(): from oslo.config import cfg CONF = cfg.CONF # NOTE(markmc): gracefully handle the CLI options not being registered if 'remote_debug' not in CONF: return if not (CONF.remote_debug.host and CONF.remote_debug.port): return from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) LOG.debug(_('Listening on %(host)s:%(port)s for debug connection'), {'host': CONF.remote_debug.host, 'port': CONF.remote_debug.port}) from pydev import pydevd pydevd.settrace(host=CONF.remote_debug.host, port=CONF.remote_debug.port, stdoutToServer=False, stderrToServer=False) LOG.warn(_('WARNING: Using the remote debug option changes how ' 'Nova uses the eventlet library to support async IO. This ' 'could result in failures that do not occur under normal ' 'operation. Use at your own risk.')) nova-2014.1.5/nova/availability_zones.py0000664000567000056700000001171112540642543021262 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Availability zone helper functions.""" from oslo.config import cfg from nova import db from nova.openstack.common import memorycache # NOTE(vish): azs don't change that often, so cache them for an hour to # avoid hitting the db multiple times on every request. AZ_CACHE_SECONDS = 60 * 60 MC = None availability_zone_opts = [ cfg.StrOpt('internal_service_availability_zone', default='internal', help='The availability_zone to show internal services under'), cfg.StrOpt('default_availability_zone', default='nova', help='Default compute node availability_zone'), ] CONF = cfg.CONF CONF.register_opts(availability_zone_opts) def _get_cache(): global MC if MC is None: MC = memorycache.get_client() return MC def reset_cache(): """Reset the cache, mainly for testing purposes and update availability_zone for host aggregate """ global MC MC = None def _make_cache_key(host): return "azcache-%s" % host.encode('utf-8') def set_availability_zones(context, services): # Makes sure services isn't a sqlalchemy object services = [dict(service.iteritems()) for service in services] metadata = db.aggregate_host_get_by_metadata_key(context, key='availability_zone') for service in services: az = CONF.internal_service_availability_zone if service['topic'] == "compute": if metadata.get(service['host']): az = u','.join(list(metadata[service['host']])) else: az = CONF.default_availability_zone # update the cache update_host_availability_zone_cache(context, service['host'], az) service['availability_zone'] = az return services def get_host_availability_zone(context, host, conductor_api=None): if conductor_api: metadata = conductor_api.aggregate_metadata_get_by_host( context, host, key='availability_zone') else: metadata = db.aggregate_metadata_get_by_host( context, host, key='availability_zone') if 'availability_zone' in metadata: az = list(metadata['availability_zone'])[0] else: az = CONF.default_availability_zone return az def update_host_availability_zone_cache(context, host, availability_zone=None): if not availability_zone: availability_zone = get_host_availability_zone(context, host) cache = _get_cache() cache_key = _make_cache_key(host) cache.delete(cache_key) cache.set(cache_key, availability_zone, AZ_CACHE_SECONDS) def get_availability_zones(context, get_only_available=False): """Return available and unavailable zones on demand. :param get_only_available: flag to determine whether to return available zones only, default False indicates return both available zones and not available zones, True indicates return available zones only """ enabled_services = db.service_get_all(context, False) enabled_services = set_availability_zones(context, enabled_services) available_zones = [] for zone in [service['availability_zone'] for service in enabled_services]: if zone not in available_zones: available_zones.append(zone) if not get_only_available: disabled_services = db.service_get_all(context, True) disabled_services = set_availability_zones(context, disabled_services) not_available_zones = [] zones = [service['availability_zone'] for service in disabled_services if service['availability_zone'] not in available_zones] for zone in zones: if zone not in not_available_zones: not_available_zones.append(zone) return (available_zones, not_available_zones) else: return available_zones def get_instance_availability_zone(context, instance): """Return availability zone of specified instance.""" host = str(instance.get('host')) if not host: return None cache_key = _make_cache_key(host) cache = _get_cache() az = cache.get(cache_key) if not az: elevated = context.elevated() az = get_host_availability_zone(elevated, host) cache.set(cache_key, az, AZ_CACHE_SECONDS) return az nova-2014.1.5/nova/cmd/0000775000567000056700000000000012540643452015562 5ustar jenkinsjenkins00000000000000nova-2014.1.5/nova/cmd/console.py0000664000567000056700000000240412540642543017576 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Console Proxy.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import version CONF = cfg.CONF CONF.import_opt('console_topic', 'nova.console.rpcapi') def main(): config.parse_args(sys.argv) logging.setup("nova") gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='nova-console', topic=CONF.console_topic) service.serve(server) service.wait() nova-2014.1.5/nova/cmd/dhcpbridge.py0000664000567000056700000000763512540642543020242 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Handle lease database updates from DHCP servers. """ from __future__ import print_function import os import sys from oslo.config import cfg from nova import config from nova import context from nova import db from nova.network import rpcapi as network_rpcapi from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import rpc CONF = cfg.CONF CONF.import_opt('host', 'nova.netconf') CONF.import_opt('network_manager', 'nova.service') LOG = logging.getLogger(__name__) def add_lease(mac, ip_address): """Set the IP that was assigned by the DHCP server.""" api = network_rpcapi.NetworkAPI() api.lease_fixed_ip(context.get_admin_context(), ip_address, CONF.host) def old_lease(mac, ip_address): """Called when an old lease is recognized.""" # NOTE(vish): We assume we heard about this lease the first time. # If not, we will get it the next time the lease is # renewed. pass def del_lease(mac, ip_address): """Called when a lease expires.""" api = network_rpcapi.NetworkAPI() api.release_fixed_ip(context.get_admin_context(), ip_address, CONF.host) def init_leases(network_id): """Get the list of hosts for a network.""" ctxt = context.get_admin_context() network_ref = db.network_get(ctxt, network_id) network_manager = importutils.import_object(CONF.network_manager) return network_manager.get_dhcp_leases(ctxt, network_ref) def add_action_parsers(subparsers): parser = subparsers.add_parser('init') # NOTE(cfb): dnsmasq always passes mac, and ip. hostname # is passed if known. We don't care about # hostname, but argparse will complain if we # do not accept it. for action in ['add', 'del', 'old']: parser = subparsers.add_parser(action) parser.add_argument('mac') parser.add_argument('ip') parser.add_argument('hostname', nargs='?', default='') parser.set_defaults(func=globals()[action + '_lease']) CONF.register_cli_opt( cfg.SubCommandOpt('action', title='Action options', help='Available dhcpbridge options', handler=add_action_parsers)) def main(): """Parse environment and arguments and call the appropriate action.""" config.parse_args(sys.argv, default_config_files=jsonutils.loads(os.environ['CONFIG_FILE'])) logging.setup("nova") global LOG LOG = logging.getLogger('nova.dhcpbridge') if CONF.action.name in ['add', 'del', 'old']: msg = (_("Called '%(action)s' for mac '%(mac)s' with ip '%(ip)s'") % {"action": CONF.action.name, "mac": CONF.action.mac, "ip": CONF.action.ip}) LOG.debug(msg) CONF.action.func(CONF.action.mac, CONF.action.ip) else: try: network_id = int(os.environ.get('NETWORK_ID')) except TypeError: LOG.error(_("Environment variable 'NETWORK_ID' must be set.")) return(1) print(init_leases(network_id)) rpc.cleanup() nova-2014.1.5/nova/cmd/api_os_compute.py0000664000567000056700000000265412540642543021151 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for Nova OS API.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('enabled_ssl_apis', 'nova.service') def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) should_use_ssl = 'osapi_compute' in CONF.enabled_ssl_apis server = service.WSGIService('osapi_compute', use_ssl=should_use_ssl) service.serve(server, workers=server.workers) service.wait() nova-2014.1.5/nova/cmd/baremetal_manage.py0000664000567000056700000001602112540642543021400 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ CLI interface for nova bare-metal management. """ import os import sys from oslo.config import cfg import six from nova import config from nova.openstack.common import cliutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import version from nova.virt.baremetal.db import migration as bmdb_migration CONF = cfg.CONF # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator class BareMetalDbCommands(object): """Class for managing the bare-metal database.""" def __init__(self): pass @args('--version', dest='version', metavar='', help='Bare-metal Database version') def sync(self, version=None): """Sync the database up to the most recent version.""" bmdb_migration.db_sync(version) def version(self): """Print the current database version.""" v = bmdb_migration.db_version() print(v) # return for unittest return v CATEGORIES = { 'db': BareMetalDbCommands, } def methods_of(obj): """Get all callable methods of an object that don't start with underscore. returns a list of tuples of the form (method_name, method) """ result = [] for i in dir(obj): if callable(getattr(obj, i)) and not i.startswith('_'): result.append((i, getattr(obj, i))) return result def add_command_parsers(subparsers): parser = subparsers.add_parser('bash-completion') parser.add_argument('query_category', nargs='?') for category in CATEGORIES: command_object = CATEGORIES[category]() parser = subparsers.add_parser(category) parser.set_defaults(command_object=command_object) category_subparsers = parser.add_subparsers(dest='action') for (action, action_fn) in methods_of(command_object): parser = category_subparsers.add_parser(action) action_kwargs = [] for args, kwargs in getattr(action_fn, 'args', []): action_kwargs.append(kwargs['dest']) kwargs['dest'] = 'action_kwarg_' + kwargs['dest'] parser.add_argument(*args, **kwargs) parser.set_defaults(action_fn=action_fn) parser.set_defaults(action_kwargs=action_kwargs) parser.add_argument('action_args', nargs='*') category_opt = cfg.SubCommandOpt('category', title='Command categories', help='Available categories', handler=add_command_parsers) def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) try: config.parse_args(sys.argv) logging.setup("nova") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print(_('sudo failed, continuing as if nothing happened')) print(_('Please re-run nova-manage as root.')) return(2) if CONF.category.name == "version": print(version.version_string_with_package()) return(0) if CONF.category.name == "bash-completion": if not CONF.category.query_category: print(" ".join(CATEGORIES.keys())) elif CONF.category.query_category in CATEGORIES: fn = CATEGORIES[CONF.category.query_category] command_object = fn() actions = methods_of(command_object) print(" ".join([k for (k, v) in actions])) return(0) fn = CONF.category.action_fn fn_args = [arg.decode('utf-8') for arg in CONF.category.action_args] fn_kwargs = {} for k in CONF.category.action_kwargs: v = getattr(CONF.category, 'action_kwarg_' + k) if v is None: continue if isinstance(v, six.string_types): v = v.decode('utf-8') fn_kwargs[k] = v # call the action with the remaining arguments # check arguments try: cliutils.validate_args(fn, *fn_args, **fn_kwargs) except cliutils.MissingArgs as e: print(fn.__doc__) print(e) return(1) try: fn(*fn_args, **fn_kwargs) return(0) except Exception: print(_("Command failed, please check log for more info")) raise nova-2014.1.5/nova/cmd/novncproxy.py0000664000567000056700000000613012540642543020361 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Websocket proxy that is compatible with OpenStack Nova noVNC consoles. Leverages websockify.py by Joel Martin """ from __future__ import print_function import os import sys from oslo.config import cfg from nova import config from nova.console import websocketproxy from nova.openstack.common.report import guru_meditation_report as gmr from nova import version opts = [ cfg.StrOpt('novncproxy_host', default='0.0.0.0', help='Host on which to listen for incoming requests'), cfg.IntOpt('novncproxy_port', default=6080, help='Port on which to listen for incoming requests'), ] CONF = cfg.CONF CONF.register_cli_opts(opts) CONF.import_opt('record', 'nova.cmd.novnc') CONF.import_opt('daemon', 'nova.cmd.novnc') CONF.import_opt('ssl_only', 'nova.cmd.novnc') CONF.import_opt('source_is_ipv6', 'nova.cmd.novnc') CONF.import_opt('cert', 'nova.cmd.novnc') CONF.import_opt('key', 'nova.cmd.novnc') CONF.import_opt('web', 'nova.cmd.novnc') def main(): # Setup flags CONF.set_default('web', '/usr/share/novnc') config.parse_args(sys.argv) if CONF.ssl_only and not os.path.exists(CONF.cert): print("SSL only and %s not found" % CONF.cert) return(-1) # Check to see if novnc html/js/css files are present if not os.path.exists(CONF.web): print("Can not find novnc html/js/css files at %s." % CONF.web) return(-1) gmr.TextGuruMeditation.setup_autorun(version) # Create and start the NovaWebSockets proxy server = websocketproxy.NovaWebSocketProxy( listen_host=CONF.novncproxy_host, listen_port=CONF.novncproxy_port, source_is_ipv6=CONF.source_is_ipv6, verbose=CONF.verbose, cert=CONF.cert, key=CONF.key, ssl_only=CONF.ssl_only, daemon=CONF.daemon, record=CONF.record, web=CONF.web, file_only=True, no_parent=True, target_host='ignore', target_port='ignore', wrap_mode='exit', wrap_cmd=None) server.start_server() nova-2014.1.5/nova/cmd/compute.py0000664000567000056700000000461312540642543017614 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Compute.""" import sys import traceback from oslo.config import cfg from nova.conductor import rpcapi as conductor_rpcapi from nova import config import nova.db.api from nova import exception from nova import objects from nova.objects import base as objects_base from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('compute_topic', 'nova.compute.rpcapi') CONF.import_opt('use_local', 'nova.conductor.api', group='conductor') def block_db_access(): class NoDB(object): def __getattr__(self, attr): return self def __call__(self, *args, **kwargs): stacktrace = "".join(traceback.format_stack()) LOG = logging.getLogger('nova.compute') LOG.error(_('No db access allowed in nova-compute: %s'), stacktrace) raise exception.DBNotAllowed('nova-compute') nova.db.api.IMPL = NoDB() def main(): objects.register_all() config.parse_args(sys.argv) logging.setup('nova') utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) if not CONF.conductor.use_local: block_db_access() objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() server = service.Service.create(binary='nova-compute', topic=CONF.compute_topic, db_allowed=CONF.conductor.use_local) service.serve(server) service.wait() nova-2014.1.5/nova/cmd/objectstore.py0000664000567000056700000000240712540642543020462 0ustar jenkinsjenkins00000000000000 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Daemon for nova objectstore. Supports S3 API.""" import sys from nova import config from nova.objectstore import s3server from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) server = s3server.get_wsgi_server() service.serve(server) service.wait() nova-2014.1.5/nova/cmd/cells.py0000664000567000056700000000265512540642543017246 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Cells Service.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('topic', 'nova.cells.opts', group='cells') CONF.import_opt('manager', 'nova.cells.opts', group='cells') def main(): config.parse_args(sys.argv) logging.setup('nova') utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='nova-cells', topic=CONF.cells.topic, manager=CONF.cells.manager) service.serve(server) service.wait() nova-2014.1.5/nova/cmd/consoleauth.py0000664000567000056700000000230612540642543020461 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """VNC Console Proxy Server.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import version CONF = cfg.CONF def main(): config.parse_args(sys.argv) logging.setup("nova") gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='nova-consoleauth', topic=CONF.consoleauth_topic) service.serve(server) service.wait() nova-2014.1.5/nova/cmd/api_ec2.py0000664000567000056700000000271512540642543017443 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for Nova EC2 API.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('enabled_ssl_apis', 'nova.service') def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) should_use_ssl = 'ec2' in CONF.enabled_ssl_apis server = service.WSGIService('ec2', use_ssl=should_use_ssl, max_url_len=16384) service.serve(server, workers=server.workers) service.wait() nova-2014.1.5/nova/cmd/all.py0000664000567000056700000000647512540642543016720 0ustar jenkinsjenkins00000000000000# Copyright 2011 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for all nova services. This script attempts to start all the nova services in one process. Each service is started in its own greenthread. Please note that exceptions and sys.exit() on the starting of a service are logged and the script will continue attempting to launch the rest of the services. """ import sys from oslo.config import cfg from nova import config from nova.objectstore import s3server from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova import service from nova import utils from nova.vnc import xvp_proxy CONF = cfg.CONF CONF.import_opt('manager', 'nova.conductor.api', group='conductor') CONF.import_opt('topic', 'nova.conductor.api', group='conductor') CONF.import_opt('enabled_apis', 'nova.service') CONF.import_opt('enabled_ssl_apis', 'nova.service') def main(): config.parse_args(sys.argv) logging.setup("nova") LOG = logging.getLogger('nova.all') utils.monkey_patch() launcher = service.process_launcher() # nova-api for api in CONF.enabled_apis: try: should_use_ssl = api in CONF.enabled_ssl_apis server = service.WSGIService(api, use_ssl=should_use_ssl) launcher.launch_service(server, workers=server.workers or 1) except (Exception, SystemExit): LOG.exception(_('Failed to load %s') % '%s-api' % api) for mod in [s3server, xvp_proxy]: try: launcher.launch_service(mod.get_wsgi_server()) except (Exception, SystemExit): LOG.exception(_('Failed to load %s') % mod.__name__) for binary in ['nova-compute', 'nova-network', 'nova-scheduler', 'nova-cert', 'nova-conductor']: # FIXME(sirp): Most service configs are defined in nova/service.py, but # conductor has set a new precedent of storing these configs # nova//api.py. # # We should update the existing services to use this new approach so we # don't have to treat conductor differently here. if binary == 'nova-conductor': topic = CONF.conductor.topic manager = CONF.conductor.manager else: topic = None manager = None try: launcher.launch_service(service.Service.create(binary=binary, topic=topic, manager=manager)) except (Exception, SystemExit): LOG.exception(_('Failed to load %s'), binary) launcher.wait() nova-2014.1.5/nova/cmd/novnc.py0000664000567000056700000000275312540642543017266 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg opts = [ cfg.BoolOpt('record', default=False, help='Record sessions to FILE.[session_number]'), cfg.BoolOpt('daemon', default=False, help='Become a daemon (background process)'), cfg.BoolOpt('ssl_only', default=False, help='Disallow non-encrypted connections'), cfg.BoolOpt('source_is_ipv6', default=False, help='Source is ipv6'), cfg.StrOpt('cert', default='self.pem', help='SSL certificate file'), cfg.StrOpt('key', help='SSL key file (if separate from cert)'), cfg.StrOpt('web', default='/usr/share/spice-html5', help='Run webserver on same port. Serve files from DIR.'), ] cfg.CONF.register_cli_opts(opts) nova-2014.1.5/nova/cmd/__init__.py0000664000567000056700000000303112540642543017670 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR import os import sys # NOTE(mikal): All of this is because if dnspython is present in your # environment then eventlet monkeypatches socket.getaddrinfo() with an # implementation which doesn't work for IPv6. What we're checking here is # that the magic environment variable was set when the import happened. if ('eventlet' in sys.modules and os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): raise ImportError('eventlet imported before nova/cmd/__init__ ' '(env var set to %s)' % os.environ.get('EVENTLET_NO_GREENDNS')) os.environ['EVENTLET_NO_GREENDNS'] = 'yes' import eventlet from nova import debugger if debugger.enabled(): # turn off thread patching to enable the remote debugger eventlet.monkey_patch(os=False, thread=False) else: eventlet.monkey_patch(os=False) nova-2014.1.5/nova/cmd/api.py0000664000567000056700000000343112540642543016706 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for Nova API. Starts both the EC2 and OpenStack APIs in separate greenthreads. """ import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('enabled_apis', 'nova.service') CONF.import_opt('enabled_ssl_apis', 'nova.service') def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) launcher = service.process_launcher() for api in CONF.enabled_apis: should_use_ssl = api in CONF.enabled_ssl_apis if api == 'ec2': server = service.WSGIService(api, use_ssl=should_use_ssl, max_url_len=16384) else: server = service.WSGIService(api, use_ssl=should_use_ssl) launcher.launch_service(server, workers=server.workers or 1) launcher.wait() nova-2014.1.5/nova/cmd/baremetal_deploy_helper.py0000664000567000056700000003055412540642543023012 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Bare-Metal Deployment Service.""" import os import sys import threading import time import cgi import Queue import re import socket import stat from wsgiref import simple_server from nova import config from nova import context as nova_context from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common import processutils from nova.openstack.common import units from nova import utils from nova.virt.baremetal import baremetal_states from nova.virt.baremetal import db from nova.virt.disk import api as disk QUEUE = Queue.Queue() LOG = logging.getLogger(__name__) class BareMetalDeployException(Exception): pass # All functions are called from deploy() directly or indirectly. # They are split for stub-out. def discovery(portal_address, portal_port): """Do iSCSI discovery on portal.""" utils.execute('iscsiadm', '-m', 'discovery', '-t', 'st', '-p', '%s:%s' % (portal_address, portal_port), run_as_root=True, check_exit_code=[0]) def login_iscsi(portal_address, portal_port, target_iqn): """Login to an iSCSI target.""" utils.execute('iscsiadm', '-m', 'node', '-p', '%s:%s' % (portal_address, portal_port), '-T', target_iqn, '--login', run_as_root=True, check_exit_code=[0]) # Ensure the login complete time.sleep(3) def logout_iscsi(portal_address, portal_port, target_iqn): """Logout from an iSCSI target.""" utils.execute('iscsiadm', '-m', 'node', '-p', '%s:%s' % (portal_address, portal_port), '-T', target_iqn, '--logout', run_as_root=True, check_exit_code=[0]) def make_partitions(dev, root_mb, swap_mb, ephemeral_mb): """Create partitions for root, ephemeral and swap on a disk device.""" # Lead in with 1MB to allow room for the partition table itself, otherwise # the way sfdisk adjusts doesn't shift the partition up to compensate, and # we lose the space. # http://bazaar.launchpad.net/~ubuntu-branches/ubuntu/raring/util-linux/ # raring/view/head:/fdisk/sfdisk.c#L1940 if ephemeral_mb: stdin_command = ('1,%d,83;\n,%d,82;\n,%d,83;\n0,0;\n' % (ephemeral_mb, swap_mb, root_mb)) else: stdin_command = ('1,%d,83;\n,%d,82;\n0,0;\n0,0;\n' % (root_mb, swap_mb)) utils.execute('sfdisk', '-uM', dev, process_input=stdin_command, run_as_root=True, attempts=3, check_exit_code=[0]) # avoid "device is busy" time.sleep(3) def is_block_device(dev): """Check whether a device is block or not.""" s = os.stat(dev) return stat.S_ISBLK(s.st_mode) def dd(src, dst): """Execute dd from src to dst.""" utils.execute('dd', 'if=%s' % src, 'of=%s' % dst, 'bs=1M', 'oflag=direct', run_as_root=True, check_exit_code=[0]) def mkswap(dev, label='swap1'): """Execute mkswap on a device.""" utils.execute('mkswap', '-L', label, dev, run_as_root=True, check_exit_code=[0]) def mkfs_ephemeral(dev, label="ephemeral0"): #TODO(jogo) support non-default mkfs options as well disk.mkfs("default", label, dev) def block_uuid(dev): """Get UUID of a block device.""" out, _ = utils.execute('blkid', '-s', 'UUID', '-o', 'value', dev, run_as_root=True, check_exit_code=[0]) return out.strip() def switch_pxe_config(path, root_uuid): """Switch a pxe config from deployment mode to service mode.""" with open(path) as f: lines = f.readlines() root = 'UUID=%s' % root_uuid rre = re.compile(r'\$\{ROOT\}') dre = re.compile('^default .*$') with open(path, 'w') as f: for line in lines: line = rre.sub(root, line) line = dre.sub('default boot', line) f.write(line) def notify(address, port): """Notify a node that it becomes ready to reboot.""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((address, port)) s.send('done') finally: s.close() def get_dev(address, port, iqn, lun): """Returns a device path for given parameters.""" dev = "/dev/disk/by-path/ip-%s:%s-iscsi-%s-lun-%s" \ % (address, port, iqn, lun) return dev def get_image_mb(image_path): """Get size of an image in Megabyte.""" mb = units.Mi image_byte = os.path.getsize(image_path) # round up size to MB image_mb = int((image_byte + mb - 1) / mb) return image_mb def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, image_path, preserve_ephemeral): """Creates partitions and write an image to the root partition. :param preserve_ephemeral: If True, no filesystem is written to the ephemeral block device, preserving whatever content it had (if the partition table has not changed). """ def raise_exception(msg): LOG.error(msg) raise BareMetalDeployException(msg) if ephemeral_mb: ephemeral_part = "%s-part1" % dev swap_part = "%s-part2" % dev root_part = "%s-part3" % dev else: root_part = "%s-part1" % dev swap_part = "%s-part2" % dev if not is_block_device(dev): raise_exception(_("parent device '%s' not found") % dev) make_partitions(dev, root_mb, swap_mb, ephemeral_mb) if not is_block_device(root_part): raise_exception(_("root device '%s' not found") % root_part) if not is_block_device(swap_part): raise_exception(_("swap device '%s' not found") % swap_part) if ephemeral_mb and not is_block_device(ephemeral_part): raise_exception(_("ephemeral device '%s' not found") % ephemeral_part) dd(image_path, root_part) mkswap(swap_part) if ephemeral_mb and not preserve_ephemeral: mkfs_ephemeral(ephemeral_part) try: root_uuid = block_uuid(root_part) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): LOG.error(_("Failed to detect root device UUID.")) return root_uuid def deploy(address, port, iqn, lun, image_path, pxe_config_path, root_mb, swap_mb, ephemeral_mb, preserve_ephemeral=False): """All-in-one function to deploy a node. :param preserve_ephemeral: If True, no filesystem is written to the ephemeral block device, preserving whatever content it had (if the partition table has not changed). """ dev = get_dev(address, port, iqn, lun) image_mb = get_image_mb(image_path) if image_mb > root_mb: root_mb = image_mb discovery(address, port) login_iscsi(address, port, iqn) try: root_uuid = work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, image_path, preserve_ephemeral) except processutils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(): # Log output if there was a error LOG.error(_("Cmd : %s"), err.cmd) LOG.error(_("StdOut : %r"), err.stdout) LOG.error(_("StdErr : %r"), err.stderr) finally: logout_iscsi(address, port, iqn) switch_pxe_config(pxe_config_path, root_uuid) # Ensure the node started netcat on the port after POST the request. time.sleep(3) notify(address, 10000) class Worker(threading.Thread): """Thread that handles requests in queue.""" def __init__(self): super(Worker, self).__init__() self.setDaemon(True) self.stop = False self.queue_timeout = 1 def run(self): while not self.stop: try: # Set timeout to check self.stop periodically (node_id, params) = QUEUE.get(block=True, timeout=self.queue_timeout) except Queue.Empty: pass else: # Requests comes here from BareMetalDeploy.post() LOG.info(_('start deployment for node %(node_id)s, ' 'params %(params)s'), {'node_id': node_id, 'params': params}) context = nova_context.get_admin_context() try: db.bm_node_update(context, node_id, {'task_state': baremetal_states.DEPLOYING}) deploy(**params) except Exception: LOG.exception(_('deployment to node %s failed'), node_id) db.bm_node_update(context, node_id, {'task_state': baremetal_states.DEPLOYFAIL}) else: LOG.info(_('deployment to node %s done'), node_id) db.bm_node_update(context, node_id, {'task_state': baremetal_states.DEPLOYDONE}) class BareMetalDeploy(object): """WSGI server for bare-metal deployment.""" def __init__(self): self.worker = Worker() self.worker.start() def __call__(self, environ, start_response): method = environ['REQUEST_METHOD'] if method == 'POST': return self.post(environ, start_response) else: start_response('501 Not Implemented', [('Content-type', 'text/plain')]) return 'Not Implemented' def post(self, environ, start_response): LOG.info(_("post: environ=%s"), environ) inpt = environ['wsgi.input'] length = int(environ.get('CONTENT_LENGTH', 0)) x = inpt.read(length) q = dict(cgi.parse_qsl(x)) try: node_id = q['i'] deploy_key = q['k'] address = q['a'] port = q.get('p', '3260') iqn = q['n'] lun = q.get('l', '1') err_msg = q.get('e') except KeyError as e: start_response('400 Bad Request', [('Content-type', 'text/plain')]) return "parameter '%s' is not defined" % e if err_msg: LOG.error(_('Deploy agent error message: %s'), err_msg) context = nova_context.get_admin_context() d = db.bm_node_get(context, node_id) if d['deploy_key'] != deploy_key: start_response('400 Bad Request', [('Content-type', 'text/plain')]) return 'key is not match' params = {'address': address, 'port': port, 'iqn': iqn, 'lun': lun, 'image_path': d['image_path'], 'pxe_config_path': d['pxe_config_path'], 'root_mb': int(d['root_mb']), 'swap_mb': int(d['swap_mb']), 'ephemeral_mb': int(d['ephemeral_mb']), 'preserve_ephemeral': d['preserve_ephemeral'], } # Restart worker, if needed if not self.worker.isAlive(): self.worker = Worker() self.worker.start() LOG.info(_("request is queued: node %(node_id)s, params %(params)s"), {'node_id': node_id, 'params': params}) QUEUE.put((node_id, params)) # Requests go to Worker.run() start_response('200 OK', [('Content-type', 'text/plain')]) return '' def main(): config.parse_args(sys.argv) logging.setup("nova") global LOG LOG = logging.getLogger('nova.virt.baremetal.deploy_helper') app = BareMetalDeploy() srv = simple_server.make_server('', 10000, app) srv.serve_forever() nova-2014.1.5/nova/cmd/cert.py0000664000567000056700000000234012540642543017070 0ustar jenkinsjenkins00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Cert.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('cert_topic', 'nova.cert.rpcapi') def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='nova-cert', topic=CONF.cert_topic) service.serve(server) service.wait() nova-2014.1.5/nova/cmd/network.py0000664000567000056700000000452712540642543017635 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Network.""" import sys import traceback from oslo.config import cfg from nova.conductor import rpcapi as conductor_rpcapi from nova import config import nova.db.api from nova import exception from nova.objects import base as objects_base from nova.openstack.common.gettextutils import _ from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('network_topic', 'nova.network.rpcapi') CONF.import_opt('use_local', 'nova.conductor.api', group='conductor') def block_db_access(): class NoDB(object): def __getattr__(self, attr): return self def __call__(self, *args, **kwargs): stacktrace = "".join(traceback.format_stack()) LOG = logging.getLogger('nova.network') LOG.error(_('No db access allowed in nova-network: %s'), stacktrace) raise exception.DBNotAllowed('nova-network') nova.db.api.IMPL = NoDB() def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) if not CONF.conductor.use_local: block_db_access() objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() server = service.Service.create(binary='nova-network', topic=CONF.network_topic, db_allowed=CONF.conductor.use_local) service.serve(server) service.wait() nova-2014.1.5/nova/cmd/spicehtml5proxy.py0000664000567000056700000000573412540642543021324 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Websocket proxy that is compatible with OpenStack Nova SPICE HTML5 consoles. Leverages websockify.py by Joel Martin """ from __future__ import print_function import os import sys from oslo.config import cfg from nova import config from nova.console import websocketproxy from nova.openstack.common.report import guru_meditation_report as gmr from nova import version opts = [ cfg.StrOpt('spicehtml5proxy_host', default='0.0.0.0', help='Host on which to listen for incoming requests'), cfg.IntOpt('spicehtml5proxy_port', default=6082, help='Port on which to listen for incoming requests'), ] CONF = cfg.CONF CONF.register_cli_opts(opts) CONF.import_opt('record', 'nova.cmd.novnc') CONF.import_opt('daemon', 'nova.cmd.novnc') CONF.import_opt('ssl_only', 'nova.cmd.novnc') CONF.import_opt('source_is_ipv6', 'nova.cmd.novnc') CONF.import_opt('cert', 'nova.cmd.novnc') CONF.import_opt('key', 'nova.cmd.novnc') CONF.import_opt('web', 'nova.cmd.novnc') def main(): # Setup flags config.parse_args(sys.argv) if CONF.ssl_only and not os.path.exists(CONF.cert): print("SSL only and %s not found." % CONF.cert) return(-1) # Check to see if spice html/js/css files are present if not os.path.exists(CONF.web): print("Can not find spice html/js/css files at %s." % CONF.web) return(-1) gmr.TextGuruMeditation.setup_autorun(version) # Create and start the NovaWebSockets proxy server = websocketproxy.NovaWebSocketProxy( listen_host=CONF.spicehtml5proxy_host, listen_port=CONF.spicehtml5proxy_port, source_is_ipv6=CONF.source_is_ipv6, verbose=CONF.verbose, cert=CONF.cert, key=CONF.key, ssl_only=CONF.ssl_only, daemon=CONF.daemon, record=CONF.record, web=CONF.web, target_host='ignore', target_port='ignore', wrap_mode='exit', wrap_cmd=None) server.start_server() nova-2014.1.5/nova/cmd/conductor.py0000664000567000056700000000273612540642543020144 0ustar jenkinsjenkins00000000000000# Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Conductor.""" import sys from oslo.config import cfg from nova import config from nova import objects from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('topic', 'nova.conductor.api', group='conductor') def main(): objects.register_all() config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='nova-conductor', topic=CONF.conductor.topic, manager=CONF.conductor.manager) workers = CONF.conductor.workers or utils.cpu_count() service.serve(server, workers=workers) service.wait() nova-2014.1.5/nova/cmd/scheduler.py0000664000567000056700000000262412540642543020116 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for Nova Scheduler.""" import sys from oslo.config import cfg from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('scheduler_topic', 'nova.scheduler.rpcapi') def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) server = service.Service.create(binary='nova-scheduler', topic=CONF.scheduler_topic) service.serve(server) service.wait() nova-2014.1.5/nova/cmd/xvpvncproxy.py0000664000567000056700000000215512540642543020565 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XVP VNC Console Proxy Server.""" import sys from nova import config from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import version from nova.vnc import xvp_proxy def main(): config.parse_args(sys.argv) logging.setup("nova") gmr.TextGuruMeditation.setup_autorun(version) wsgi_server = xvp_proxy.get_wsgi_server() service.serve(wsgi_server) service.wait() nova-2014.1.5/nova/cmd/api_metadata.py0000664000567000056700000000333012540642543020544 0ustar jenkinsjenkins00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Starter script for Nova Metadata API.""" import sys from oslo.config import cfg from nova.conductor import rpcapi as conductor_rpcapi from nova import config from nova.objects import base as objects_base from nova.openstack.common import log as logging from nova.openstack.common.report import guru_meditation_report as gmr from nova import service from nova import utils from nova import version CONF = cfg.CONF CONF.import_opt('enabled_ssl_apis', 'nova.service') CONF.import_opt('use_local', 'nova.conductor.api', group='conductor') def main(): config.parse_args(sys.argv) logging.setup("nova") utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version) if not CONF.conductor.use_local: objects_base.NovaObject.indirection_api = \ conductor_rpcapi.ConductorAPI() should_use_ssl = 'metadata' in CONF.enabled_ssl_apis server = service.WSGIService('metadata', use_ssl=should_use_ssl) service.serve(server, workers=server.workers) service.wait() nova-2014.1.5/nova/cmd/manage.py0000664000567000056700000015455712540642543017405 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Interactive shell based on Django: # # Copyright (c) 2005, the Lawrence Journal-World # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Django nor the names of its contributors may be # used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ CLI interface for nova management. """ from __future__ import print_function import os import sys import netaddr from oslo.config import cfg from oslo import messaging import six from nova.api.ec2 import ec2utils from nova import availability_zones from nova.compute import flavors from nova import config from nova import context from nova import db from nova.db import migration from nova import exception from nova.openstack.common import cliutils from nova.openstack.common.db import exception as db_exc from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova import quota from nova import rpc from nova import servicegroup from nova import version CONF = cfg.CONF CONF.import_opt('network_manager', 'nova.service') CONF.import_opt('service_down_time', 'nova.service') CONF.import_opt('flat_network_bridge', 'nova.network.manager') CONF.import_opt('num_networks', 'nova.network.manager') CONF.import_opt('multi_host', 'nova.network.manager') CONF.import_opt('network_size', 'nova.network.manager') CONF.import_opt('vlan_start', 'nova.network.manager') CONF.import_opt('vpn_start', 'nova.network.manager') CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') CONF.import_opt('public_interface', 'nova.network.linux_net') QUOTAS = quota.QUOTAS # Decorators for actions def args(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('args', []).insert(0, (args, kwargs)) return func return _decorator def param2id(object_id): """Helper function to convert various volume id types to internal id. args: [object_id], e.g. 'vol-0000000a' or 'volume-0000000a' or '10' """ if '-' in object_id: return ec2utils.ec2_vol_id_to_uuid(object_id) else: return object_id class VpnCommands(object): """Class for managing VPNs.""" @args('--project', dest='project_id', metavar='', help='Project name') @args('--ip', metavar='', help='IP Address') @args('--port', metavar='', help='Port') def change(self, project_id, ip, port): """Change the ip and port for a vpn. this will update all networks associated with a project not sure if that's the desired behavior or not, patches accepted """ # TODO(tr3buchet): perhaps this shouldn't update all networks # associated with a project in the future admin_context = context.get_admin_context() networks = db.project_get_networks(admin_context, project_id) for network in networks: db.network_update(admin_context, network['id'], {'vpn_public_address': ip, 'vpn_public_port': int(port)}) class ShellCommands(object): def bpython(self): """Runs a bpython shell. Falls back to Ipython/python shell if unavailable """ self.run('bpython') def ipython(self): """Runs an Ipython shell. Falls back to Python shell if unavailable """ self.run('ipython') def python(self): """Runs a python shell. Falls back to Python shell if unavailable """ self.run('python') @args('--shell', metavar='', help='Python shell') def run(self, shell=None): """Runs a Python interactive interpreter.""" if not shell: shell = 'bpython' if shell == 'bpython': try: import bpython bpython.embed() except ImportError: shell = 'ipython' if shell == 'ipython': try: import IPython # Explicitly pass an empty list as arguments, because # otherwise IPython would use sys.argv from this script. shell = IPython.Shell.IPShell(argv=[]) shell.mainloop() except ImportError: shell = 'python' if shell == 'python': import code try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', # because we already know 'readline' was imported successfully. readline.parse_and_bind("tab:complete") code.interact() @args('--path', metavar='', help='Script path') def script(self, path): """Runs the script from the specified path with flags set properly. arguments: path """ exec(compile(open(path).read(), path, 'exec'), locals(), globals()) def _db_error(caught_exception): print(caught_exception) print(_("The above error may show that the database has not " "been created.\nPlease create a database using " "'nova-manage db sync' before running this command.")) exit(1) class ProjectCommands(object): """Class for managing projects.""" @args('--project', dest='project_id', metavar='', help='Project name') @args('--user', dest='user_id', metavar='', help='User name') @args('--key', metavar='', help='Key') @args('--value', metavar='', help='Value') def quota(self, project_id, user_id=None, key=None, value=None): """Create, update or display quotas for project/user If no quota key is provided, the quota will be displayed. If a valid quota key is provided and it does not exist, it will be created. Otherwise, it will be updated. """ ctxt = context.get_admin_context() if user_id: quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id) else: user_id = None quota = QUOTAS.get_project_quotas(ctxt, project_id) # if key is None, that means we need to show the quotas instead # of updating them if key: settable_quotas = QUOTAS.get_settable_quotas(ctxt, project_id, user_id=user_id) if key in quota: minimum = settable_quotas[key]['minimum'] maximum = settable_quotas[key]['maximum'] if value.lower() == 'unlimited': value = -1 if int(value) < -1: print(_('Quota limit must be -1 or greater.')) return(2) if ((int(value) < minimum) and (maximum != -1 or (maximum == -1 and int(value) != -1))): print(_('Quota limit must be greater than %s.') % minimum) return(2) if maximum != -1 and int(value) > maximum: print(_('Quota limit must be less than %s.') % maximum) return(2) try: db.quota_create(ctxt, project_id, key, value, user_id=user_id) except exception.QuotaExists: db.quota_update(ctxt, project_id, key, value, user_id=user_id) else: print(_('%(key)s is not a valid quota key. Valid options are: ' '%(options)s.') % {'key': key, 'options': ', '.join(quota)}) return(2) print_format = "%-36s %-10s %-10s %-10s" print(print_format % ( _('Quota'), _('Limit'), _('In Use'), _('Reserved'))) # Retrieve the quota after update if user_id: quota = QUOTAS.get_user_quotas(ctxt, project_id, user_id) else: quota = QUOTAS.get_project_quotas(ctxt, project_id) for key, value in quota.iteritems(): if value['limit'] < 0 or value['limit'] is None: value['limit'] = 'unlimited' print(print_format % (key, value['limit'], value['in_use'], value['reserved'])) @args('--project', dest='project_id', metavar='', help='Project name') def scrub(self, project_id): """Deletes data associated with project.""" admin_context = context.get_admin_context() networks = db.project_get_networks(admin_context, project_id) for network in networks: db.network_disassociate(admin_context, network['id']) groups = db.security_group_get_by_project(admin_context, project_id) for group in groups: db.security_group_destroy(admin_context, group['id']) AccountCommands = ProjectCommands class FixedIpCommands(object): """Class for managing fixed ip.""" @args('--host', metavar='', help='Host') def list(self, host=None): """Lists all fixed ips (optionally by host).""" ctxt = context.get_admin_context() try: if host is None: fixed_ips = db.fixed_ip_get_all(ctxt) else: fixed_ips = db.fixed_ip_get_by_host(ctxt, host) except exception.NotFound as ex: print(_("error: %s") % ex) return(2) instances = db.instance_get_all(context.get_admin_context()) instances_by_uuid = {} for instance in instances: instances_by_uuid[instance['uuid']] = instance print("%-18s\t%-15s\t%-15s\t%s" % (_('network'), _('IP address'), _('hostname'), _('host'))) all_networks = {} try: # use network_get_all to retrieve all existing networks # this is to ensure that IPs associated with deleted networks # will not throw exceptions. for network in db.network_get_all(context.get_admin_context()): all_networks[network.id] = network except exception.NoNetworksFound: # do not have any networks, so even if there are IPs, these # IPs should have been deleted ones, so return. print(_('No fixed IP found.')) return has_ip = False for fixed_ip in fixed_ips: hostname = None host = None network = all_networks.get(fixed_ip['network_id']) if network: has_ip = True if fixed_ip.get('instance_uuid'): instance = instances_by_uuid.get(fixed_ip['instance_uuid']) if instance: hostname = instance['hostname'] host = instance['host'] else: print(_('WARNING: fixed ip %s allocated to missing' ' instance') % str(fixed_ip['address'])) print("%-18s\t%-15s\t%-15s\t%s" % ( network['cidr'], fixed_ip['address'], hostname, host)) if not has_ip: print(_('No fixed IP found.')) @args('--address', metavar='', help='IP address') def reserve(self, address): """Mark fixed ip as reserved arguments: address """ return self._set_reserved(address, True) @args('--address', metavar='', help='IP address') def unreserve(self, address): """Mark fixed ip as free to use arguments: address """ return self._set_reserved(address, False) def _set_reserved(self, address, reserved): ctxt = context.get_admin_context() try: fixed_ip = db.fixed_ip_get_by_address(ctxt, address) if fixed_ip is None: raise exception.NotFound('Could not find address') db.fixed_ip_update(ctxt, fixed_ip['address'], {'reserved': reserved}) except exception.NotFound as ex: print(_("error: %s") % ex) return(2) class FloatingIpCommands(object): """Class for managing floating ip.""" @staticmethod def address_to_hosts(addresses): """Iterate over hosts within an address range. If an explicit range specifier is missing, the parameter is interpreted as a specific individual address. """ try: return [netaddr.IPAddress(addresses)] except ValueError: net = netaddr.IPNetwork(addresses) if net.size < 4: reason = _("/%s should be specified as single address(es) " "not in cidr format") % net.prefixlen raise exception.InvalidInput(reason=reason) elif net.size >= 1000000: # NOTE(dripton): If we generate a million IPs and put them in # the database, the system will slow to a crawl and/or run # out of memory and crash. This is clearly a misconfiguration. reason = _("Too many IP addresses will be generated. Please " "increase /%s to reduce the number generated." ) % net.prefixlen raise exception.InvalidInput(reason=reason) else: return net.iter_hosts() @args('--ip_range', metavar='', help='IP range') @args('--pool', metavar='', help='Optional pool') @args('--interface', metavar='', help='Optional interface') def create(self, ip_range, pool=None, interface=None): """Creates floating ips for zone by range.""" admin_context = context.get_admin_context() if not pool: pool = CONF.default_floating_pool if not interface: interface = CONF.public_interface ips = ({'address': str(address), 'pool': pool, 'interface': interface} for address in self.address_to_hosts(ip_range)) try: db.floating_ip_bulk_create(admin_context, ips) except exception.FloatingIpExists as exc: # NOTE(simplylizz): Maybe logging would be better here # instead of printing, but logging isn't used here and I # don't know why. print('error: %s' % exc) return(1) @args('--ip_range', metavar='', help='IP range') def delete(self, ip_range): """Deletes floating ips by range.""" admin_context = context.get_admin_context() ips = ({'address': str(address)} for address in self.address_to_hosts(ip_range)) db.floating_ip_bulk_destroy(admin_context, ips) @args('--host', metavar='', help='Host') def list(self, host=None): """Lists all floating ips (optionally by host). Note: if host is given, only active floating IPs are returned """ ctxt = context.get_admin_context() try: if host is None: floating_ips = db.floating_ip_get_all(ctxt) else: floating_ips = db.floating_ip_get_all_by_host(ctxt, host) except exception.NoFloatingIpsDefined: print(_("No floating IP addresses have been defined.")) return for floating_ip in floating_ips: instance_uuid = None if floating_ip['fixed_ip_id']: fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id']) instance_uuid = fixed_ip['instance_uuid'] print("%s\t%s\t%s\t%s\t%s" % (floating_ip['project_id'], floating_ip['address'], instance_uuid, floating_ip['pool'], floating_ip['interface'])) class NetworkCommands(object): """Class for managing networks.""" @args('--label', metavar='