Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update Bobcat Support #23

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 112 additions & 3 deletions defs/components.json
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,18 @@
}
},
"bobcat": {
"23.3.1.1.0.0": {
"comment": "StorPool fixes for Bobcat (iSCSI multipath)",
"files": {
"volume/driver.py": {
"sha256": "5e8f5128803180fc5b1ccad2791bebcf847f0bde542eb58597b2e8d12d48ab15"
},
"volume/drivers/storpool.py": {
"sha256": "85033648d36605fbba2b445bb94d0acaae3763049802e720e5389b451b237678"
}
},
"outdated": false
},
"23.0.0.8.0.0": {
"comment": "StorPool fixes for Bobcat (iSCSI multipath)",
"files": {
Expand All @@ -85,7 +97,19 @@
"sha256": "dd9ef3a599089a8c6b6e0e19a7c017556504c1e47c07e4b53d3d8a4492757d68"
}
},
"outdated": false
"outdated": true
},
"23.3.1.0.0.0": {
"comment": "Kolla Bobcat snapshot 2024-12-20",
"files": {
"volume/driver.py": {
"sha256": "5e8f5128803180fc5b1ccad2791bebcf847f0bde542eb58597b2e8d12d48ab15"
},
"volume/drivers/storpool.py": {
"sha256": "658572416f99ac316860df92b57466dfa99612bf4dd64ee1b4fe11d52e3eae4e"
}
},
"outdated": true
},
"23.0.0.0.0.0": {
"comment": "upstream Bobcat 23.0.0",
Expand Down Expand Up @@ -638,6 +662,18 @@
],
"branches": {
"bobcat": {
"27.1.1.0.0.0": {
"comment": "Kolla Glance snapshot 2024-12-20",
"files": {
"async_/__init__.py": {
"sha256": "6f3869b89401197d1ddc1df53ec4fc6f55546fb4d88ec9eaf837e9fdc00cebc5"
},
"api/v2/images.py": {
"sha256": "539e664ecb0aedfe568738b84fe508a9d159093285a8c0f843b31bd57d3b2b11"
}
},
"outdated": false
},
"26.0.0.0.0.90": {
"comment": "upstream Glance 27.0.0.0rc2",
"files": {
Expand Down Expand Up @@ -770,6 +806,30 @@
}
},
"bobcat": {
"28.0.0.2.0.0": {
"comment": "StorPool fixes for Bobcat 28.0.0 (iothread)",
"files": {
"virt/libvirt/driver.py": {
"sha256": "0c4cde6ba7458b4df838bde62d4dbe254552bf1647bc4c640dc4bf319cba759f"
},
"virt/libvirt/config.py": {
"sha256": "b449939fb19c91384f90c6aa5480446fe34d0104a1ce051b7669b0b14db7ccd7"
},
"conf/libvirt.py": {
"sha256": "6a48a179a8c45fcb568bb3785af2f88fd1e22c5f3f43c1d87c1f54058ce82e9f"
},
"tests/fixtures/libvirt_data.py": {
"sha256": "97c441c46658d586cba7047731bbec063377403f20294acf5984e3781695c1c9"
},
"tests/unit/virt/libvirt/test_config.py": {
"sha256": "7a49354ea661a7ef3f1f6f1d64ebaa2140141088df9eca0e8ba4ffe056a58611"
},
"virt/libvirt/volume/volume.py": {
"sha256": "27e7f62d25c29d04ea6f43abe473aec7db729b9defc5f800c6b94c2472b22200"
}
},
"outdated": false
},
"28.0.0.1.0.0": {
"comment": "StorPool fixes for Bobcat 28.0.0 (iothread)",
"files": {
Expand All @@ -792,7 +852,31 @@
"sha256": "27e7f62d25c29d04ea6f43abe473aec7db729b9defc5f800c6b94c2472b22200"
}
},
"outdated": false
"outdated": true
},
"28.3.1.0.0.0": {
"comment": "Kolla Bobcat snapshot",
"files": {
"virt/libvirt/driver.py": {
"sha256": "ba3874a230c593e9d473a6f374f802bb03d71fbd82d227a04ea5db3be942b511"
},
"virt/libvirt/config.py": {
"sha256": "1432cf104dc22aec4a2785877c458621ddab40392b2d260aea38d114bac4c5f0"
},
"conf/libvirt.py": {
"sha256": "d670cf7805e73c68a137bdc525a09b2fbce1ce3bd56f931b45ee9c02f4c618b4"
},
"tests/fixtures/libvirt_data.py": {
"sha256": "b9d1d763825343efb01d158defd1630ac4569923951b1b140a633421bc313fbb"
},
"tests/unit/virt/libvirt/test_config.py": {
"sha256": "e7572541968919529aac1fb2f6c90a46a17e972a800f360d5034650c7ef2bdc2"
},
"virt/libvirt/volume/volume.py": {
"sha256": "da9268880d3dc8d7146cea9866fb30855cb528d4179e11b7fa2cf6b7b85acab3"
}
},
"outdated": true
},
"28.0.1.0.0.2": {
"comment": "upstream Bobcat snapshot 2024-04-23",
Expand Down Expand Up @@ -1527,6 +1611,18 @@
}
},
"bobcat": {
"6.4.2.1.0.0": {
"comment": "StorPool updates for Bobcat 6.4.2 (attach-globalid, raise-on-spopenstack-issues)",
"files": {
"initiator/connectors/fibre_channel.py": {
"sha256": "5847f578930125b4878bc48a71122c6f8b0e89b4e5320d2b9fbce6a74f18544c"
},
"initiator/connectors/storpool.py": {
"sha256": "46f49ca9c8470ce5782a13714ed52a143e600a6550f0b7c7fed7813873f62e5c"
}
},
"outdated": false
},
"6.4.0.2.0.0": {
"comment": "StorPool updates for Bobcat 6.4.0 (attach-globalid, raise-on-spopenstack-issues)",
"files": {
Expand All @@ -1537,7 +1633,7 @@
"sha256": "b85eacdb358603ecd976190d96a059d7c5f444803cc4e88d5019b5913dd6e6b5"
}
},
"outdated": false
"outdated": true
},
"6.4.0.1.0.0": {
"comment": "StorPool updates for Bobcat 6.4.0",
Expand All @@ -1551,6 +1647,19 @@
},
"outdated": true
},
"6.4.2.0.0.0": {
"comment": "Kolla Bobcat 6.4.2 snapshot 2024-12-20",

"files": {
"initiator/connectors/fibre_channel.py": {
"sha256": "5847f578930125b4878bc48a71122c6f8b0e89b4e5320d2b9fbce6a74f18544c"
},
"initiator/connectors/storpool.py": {
"sha256": "1f24d4f7033c2585678341fb0b114d89e12af6070e758f111c9a505610c3d41c"
}
},
"outdated": true
},
"6.4.0.0.0.0": {
"comment": "upstream Bobcat 6.4.0",
"files": {
Expand Down
77 changes: 34 additions & 43 deletions drivers/cinder/openstack/bobcat/storpool.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2014 - 2022 StorPool
# Copyright (c) 2014 - 2019 StorPool
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
Expand All @@ -25,7 +25,6 @@
from oslo_utils import netutils
from oslo_utils import units
from oslo_utils import uuidutils
import six

from cinder.common import constants
from cinder import context
Expand Down Expand Up @@ -59,7 +58,8 @@
'An empty string (the default) makes the driver export '
'all volumes using the StorPool native network protocol. '
'The value "*" makes the driver export all volumes using '
'iSCSI. '
'iSCSI (see the Cinder StorPool driver documentation for '
'how this option and ``iscsi_cinder_volume`` interact). '
'Any other value leads to an experimental not fully '
'supported configuration and is interpreted as '
'a whitespace-separated list of patterns for IQNs for '
Expand Down Expand Up @@ -143,18 +143,11 @@ class StorPoolDriver(driver.VolumeDriver):
1.2.2 - Reintroduce the driver into OpenStack Queens,
add ignore_errors to the internal _detach_volume() method
1.2.3 - Advertise some more driver capabilities.
2.0.0 - Drop _attach_volume() and _detach_volume(), our os-brick
connector will handle this.
- Detach temporary snapshots and volumes after copying data
to or from from Glance images.
- Drop backup_volume()
- Avoid data duplication in create_cloned_volume()
- Implement clone_image()
- Implement revert_to_snapshot().
- Add support for exporting volumes via iSCSI
2.0.0 - Implement revert_to_snapshot().
2.1.0 - Add iSCSI export support.
"""

VERSION = '2.0.0'
VERSION = '2.1.0'
CI_WIKI_NAME = 'StorPool_distributed_storage_CI'

def __init__(self, *args, **kwargs):
Expand All @@ -164,14 +157,14 @@ def __init__(self, *args, **kwargs):
self._ourId = None
self._ourIdInt = None
self._attach = None
self._use_iscsi = None
self._use_iscsi = False

@staticmethod
def get_driver_options():
return storpool_opts

def _backendException(self, e):
return exception.VolumeBackendAPIException(data=six.text_type(e))
return exception.VolumeBackendAPIException(data=str(e))

def _template_from_volume(self, volume):
default = self.configuration.storpool_template
Expand Down Expand Up @@ -625,9 +618,9 @@ def create_cloned_volume(self, volume, src_vref):
src_template = self._template_from_volume(src_volume)

template = self._template_from_volume(volume)
LOG.debug('clone volume id %(vol_id)s template %(template)s', {
'vol_id': repr(volume['id']),
'template': repr(template),
LOG.debug('clone volume id %(vol_id)r template %(template)r', {
'vol_id': volume['id'],
'template': template,
})
if template == src_template:
LOG.info('Using baseOn to clone a volume into the same template')
Expand Down Expand Up @@ -777,7 +770,7 @@ def _update_volume_stats(self):
'total_capacity_gb': total / units.Gi,
'free_capacity_gb': free / units.Gi,
'reserved_percentage': 0,
'multiattach': not self._use_iscsi,
'multiattach': self._use_iscsi,
'QoS_support': False,
'thick_provisioning_support': False,
'thin_provisioning_support': True,
Expand Down Expand Up @@ -829,26 +822,24 @@ def retype(self, context, volume, new_type, diff, host):
templ = self.configuration.storpool_template
repl = self.configuration.storpool_replication
if diff['extra_specs']:
for (k, v) in diff['extra_specs'].items():
if k == 'volume_backend_name':
if v[0] != v[1]:
# Retype of a volume backend not supported yet,
# the volume needs to be migrated.
return False
elif k == 'storpool_template':
if v[0] != v[1]:
if v[1] is not None:
update['template'] = v[1]
elif templ is not None:
update['template'] = templ
else:
update['replication'] = repl
else:
# We ignore any extra specs that we do not know about.
# Let's leave it to Cinder's scheduler to not even
# get this far if there is any serious mismatch between
# the volume types.
pass
# Check for the StorPool extra specs. We intentionally ignore any
# other extra_specs because the cinder scheduler should not even
# call us if there's a serious mismatch between the volume types."
if diff['extra_specs'].get('volume_backend_name'):
v = diff['extra_specs'].get('volume_backend_name')
if v[0] != v[1]:
# Retype of a volume backend not supported yet,
# the volume needs to be migrated.
return False
if diff['extra_specs'].get('storpool_template'):
v = diff['extra_specs'].get('storpool_template')
if v[0] != v[1]:
if v[1] is not None:
update['template'] = v[1]
elif templ is not None:
update['template'] = templ
else:
update['replication'] = repl

if update:
name = self._attach.volumeName(volume['id'])
Expand Down Expand Up @@ -881,21 +872,21 @@ def update_migrated_volume(self, context, volume, new_volume,
'the StorPool cluster.',
{'oid': orig_id, 'tid': temp_id})
int_name = temp_name + '--temp--mig'
LOG.debug('Trying to swap the volume names, intermediate "%(int)s"',
LOG.debug('Trying to swap volume names, intermediate "%(int)s"',
{'int': int_name})
try:
LOG.debug('- rename "%(orig)s" to "%(int)s"',
{'orig': orig_name, 'int': int_name})
{'orig': orig_name, 'int': int_name})
self._attach.api().volumeUpdate(orig_name,
{'rename': int_name})

LOG.debug('- rename "%(temp)s" to "%(orig)s"',
{'temp': temp_name, 'orig': orig_name})
{'temp': temp_name, 'orig': orig_name})
self._attach.api().volumeUpdate(temp_name,
{'rename': orig_name})

LOG.debug('- rename "%(int)s" to "%(temp)s"',
{'int': int_name, 'temp': temp_name})
{'int': int_name, 'temp': temp_name})
self._attach.api().volumeUpdate(int_name,
{'rename': temp_name})
return {'_name_id': None}
Expand Down
39 changes: 27 additions & 12 deletions drivers/nova/openstack/bobcat/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -734,12 +734,31 @@ def _handle_conn_event(self, enabled, reason):
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)

def _init_host_topology(self):
"""To work around a bug in libvirt that reports offline CPUs as always
being on socket 0 regardless of their real socket, power up all
dedicated CPUs (the only ones whose socket we actually care about),
then call get_capabilities() to initialize the topology with the
correct socket values. get_capabilities()'s implementation will reuse
these initial socket value, and avoid clobbering them with 0 for
offline CPUs.
"""
cpus = hardware.get_cpu_dedicated_set()
if cpus:
self.cpu_api.power_up(cpus)
self._host.get_capabilities()

def init_host(self, host):
self._host.initialize()

self._update_host_specific_capabilities()

# NOTE(artom) Do this first to make sure our first call to
# get_capabilities() happens with all dedicated CPUs online and caches
# their correct socket ID. Unused dedicated CPUs will be powered down
# further down in this method.
self._check_cpu_set_configuration()
self._init_host_topology()

self._update_host_specific_capabilities()

self._do_quality_warnings()

Expand Down Expand Up @@ -1623,12 +1642,12 @@ def cleanup(self, context, instance, network_info, block_device_info=None,
cleanup_instance_dir = True
cleanup_instance_disks = True
else:
# NOTE(mdbooth): I think the theory here was that if this is a
# migration with shared block storage then we need to delete the
# instance directory because that's not shared. I'm pretty sure
# this is wrong.
# NOTE(mheler): For shared block storage we only need to clean up
# the instance directory when it's not on a shared path.
if migrate_data and 'is_shared_block_storage' in migrate_data:
cleanup_instance_dir = migrate_data.is_shared_block_storage
cleanup_instance_dir = (
migrate_data.is_shared_block_storage and
not migrate_data.is_shared_instance_path)

# NOTE(lyarwood): The following workaround allows operators to
# ensure that non-shared instance directories are removed after an
Expand Down Expand Up @@ -2995,11 +3014,7 @@ def _create_snapshot_metadata(self, image_meta, instance,
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type

# NOTE(vish): glance forces ami disk format to be ami
if image_meta.disk_format == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['disk_format'] = img_fmt

if image_meta.obj_attr_is_set("container_format"):
metadata['container_format'] = image_meta.container_format
Expand Down
Loading