diff --git a/CHANGELOG.md b/CHANGELOG.md index 6eeec92..b55d922 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +**16/07/2021** [Version 2.4.3] + + - Fix network services instantiation + - Fix csvToInflux exception on Ubuntu 18.04 + **08/06/2021** [Version 2.4.2] - Allow instantiation of slices without network services or scenario diff --git a/Composer/composer.py b/Composer/composer.py index 81957a3..12d03ec 100644 --- a/Composer/composer.py +++ b/Composer/composer.py @@ -39,14 +39,18 @@ def _messageTask(severity: str, message: str) -> TaskDefinition: if descriptor.Slice is not None: if len(descriptor.NetworkServices) != 0: sliceManager = Management.SliceManager() + nameToLocation = sliceManager.GetVimNameToLocationMapping() for ns in descriptor.NetworkServices: - nsId, location = ns + nsId, vimName = ns try: - nsInfo = NsInfo(nsId, location) - requirements = sliceManager.GetNsdRequirements(nsId) - if requirements is None: - raise RuntimeError("Could not retrieve NSD information") - nsInfo.Requirements = requirements + nsdName, nsdId, nsdRequirements = sliceManager.GetNsdData(nsId) + location = nameToLocation.get(vimName, None) + if nsdRequirements is None: + raise RuntimeError(f"Could not retrieve NSD information for '{nsId}'") + elif location is None: + raise RuntimeError(f"Could not retrieve location for VIM '{vimName}'") + nsInfo = NsInfo(nsdName, nsdId, location) + nsInfo.Requirements = nsdRequirements configuration.NetworkServices.append(nsInfo) except Exception as e: errored = True @@ -120,14 +124,7 @@ def composeNest(cls, baseSlice: str, scenario: str, nss: List[NsInfo]) -> Tuple[ if baseSlice is None: raise RuntimeError("Cannot create NEST without a base slice value") - nsList = [] - for ns in nss: - nsList.append({ - "nsd-id": ns.Id, - "placement": ns.Location, - }) - - sliceDescriptor = {"base_slice_des_id": baseSlice} + sliceDescriptor = {"base_slice_des_ref": baseSlice} if scenario is not None: scenarioData = Facility.Scenarios().get(scenario, None) @@ -136,6 +133,15 @@ def composeNest(cls, baseSlice: str, scenario: str, nss: List[NsInfo]) -> Tuple[ sliceDescriptor.update(scenarioData) # We allow having no scenario, but not having an unrecognized one + nsList = [] + for ns in nss: + nsList.append({ + "nsd-id": ns.Id, + "ns-name": ns.Name, + "placement": ns.Location, + "optional": False # All network services should be deployed for the test + }) + nest = {"base_slice_descriptor": sliceDescriptor} if len(nsList) != 0: nest["service_descriptor"] = {"ns_list": nsList} diff --git a/Data/ns_info.py b/Data/ns_info.py index c200605..80e15f8 100644 --- a/Data/ns_info.py +++ b/Data/ns_info.py @@ -3,11 +3,12 @@ class NsInfo: - def __init__(self, nsId: str, location: str): + def __init__(self, nsName: str, nsId: str, location: str): + self.Name = nsName self.Id = nsId self.Location = location self.SliceId: Optional[str] = None self.Requirements = Metal() def __repr__(self): - return f'NS:{self.Id}@{self.Location} SliceId:{self.SliceId} Req:[{self.Requirements}]' + return f'NS:{self.Name}|{self.Id}@{self.Location} SliceId:{self.SliceId} Req:[{self.Requirements}]' diff --git a/Helper/influx.py b/Helper/influx.py index fd7c3f5..063e9b6 100644 --- a/Helper/influx.py +++ b/Helper/influx.py @@ -153,7 +153,7 @@ def _convert(value: str) -> Union[int, float, bool, str]: timestampValue = float(row.pop(timestampKey)) try: timestamp = datetime.fromtimestamp(timestampValue, tz=timezone.utc) - except OSError: + except (OSError, ValueError): # value outside of bounds, maybe because it's specified in milliseconds instead of seconds timestamp = datetime.fromtimestamp(timestampValue/1000.0, tz=timezone.utc) diff --git a/Interfaces/management.py b/Interfaces/management.py index f9a8888..cdeb2c7 100644 --- a/Interfaces/management.py +++ b/Interfaces/management.py @@ -35,7 +35,7 @@ def HasResources(cls, owner: 'ExecutorBase', localResources: List[str], for vim, required in totalRequired.items(): if vim not in vimResources.keys(): - Log.E(f"Unknown VIM {vim}. Execution unfeasible.") + Log.E(f"Unknown VIM '{vim}'. Execution unfeasible.") return False, False current = vimResources[vim] if (required.Cpu > current.TotalCpu or @@ -111,8 +111,12 @@ def _getVimResources(vimData): data = self.ResponseToJson(response) try: for vim in data["VIMs"]: + # Index by name and location name = vim["name"] - res[name] = _getVimResources(vim) + location = vim["location"] + resources = _getVimResources(vim) + res[name] = resources + res[location] = resources except Exception as e: Log.E(f"Exception while retrieving VIM resources: {e}") Log.D(f"Payload: {data}") @@ -123,13 +127,16 @@ def GetNsdInfo(self, nsdName: str = None) -> Dict: response = self.HttpGet(url, {"Accept": "application/json"}) data = self.ResponseToJson(response) - allNsds = {} + allNsds = {} for nsd in data: + # Index by name and database id allNsds[nsd['nsd-name']] = nsd + allNsds[nsd['nsd-id']] = nsd return allNsds if nsdName is None else allNsds[nsdName] - def GetNsdRequirements(self, nsd: str) -> Optional[Metal]: + def GetNsdData(self, nsd: str) -> Tuple[Optional[str], Optional[str], Optional[Metal]]: + """Returns (nsd_name, nsd_id, requirements (as Metal))""" try: data = self.GetNsdInfo(nsd) if isinstance(data, list): @@ -138,12 +145,13 @@ def GetNsdRequirements(self, nsd: str) -> Optional[Metal]: else: raise RuntimeError("Received an empty list") try: flavor = data["flavor"] - return Metal(cpu=flavor["vcpu-count"], ram=flavor["memory-mb"], disk=flavor["storage-gb"]) + return data['nsd-name'], data['nsd-id'], Metal(cpu=flavor["vcpu-count"], + ram=flavor["memory-mb"], disk=flavor["storage-gb"]) except KeyError as k: raise RuntimeError(f"'{k}' key not present in data") except Exception as e: Log.E(f"Exception while retrieving NSD information: {e}") - return None + return None, None, None def GetBaseSliceDescriptors(self) -> List[str]: try: @@ -160,4 +168,21 @@ def GetBaseSliceDescriptors(self) -> List[str]: Log.E(f"Exception while retrieving Base Slice Descriptors: {e}") return [] + def GetVimNameToLocationMapping(self) -> Dict[str, str]: + try: + response = self.HttpGet(f"{self.api_url}/vim", {"Accept": "application/json"}) + data = self.ResponseToJson(response) + vimIds = [vim['_id'] for vim in data] + except Exception as e: + Log.E(f"Exception while retrieving VIM ids: {e}") + return {} + res = {} + for vimId in vimIds: + try: + response = self.HttpGet(f"{self.api_url}/vim/{vimId}", {"Accept": "application/json"}) + data = self.ResponseToJson(response) + res[data["name"]] = data["location"] + except Exception as e: + Log.W(f"Exception while retrieving information for VIM '{vimId}': {e}") + return res diff --git a/README.md b/README.md index 2f6b29b..4eb710d 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ certain Visual C++ redistributables to be installed, and the following packages distributions: `gcc python3.7 python3.7-venv python3.7-dev`. Fixes for specific issues are usually easy to find on Internet. -This repository includes two sets of scripts for use on Linux (`.sh`) and Windows (`.ps1`) machines. In general +This repository includes two sets of scripts for use on Linux (`.sh`) and Windows (`.ps1`) machines. In general, these scripts should be able to perform most of the actions required for instantiating the ELCM, however, depending on the deployment environment some actions may fail or require additional tweaking. The contents of the scripts can be used as a guide for manual installation, and a description of the actions performed by the scripts is included below @@ -90,21 +90,21 @@ The ELCM instance can be configured by editing the `config.yml` file. The values set to True only if the TAP executions hang at the end frequently due to adb not closing. > These values will be used by the `Run.TapExecute` task. * Grafana: - * Enabled - * Host - * Port + * Enabled: If set to False the settings below will be ignored + * Host: Location of the machine where the Grafana instance is running + * Port: Port where the Grafana API is listening * Bearer: Grafana API key without the 'Bearer ' prefix * ReportGenerator: URL where the `Grafana reporter` instance can be reached, if any > These values will be used when creating a dashboard for the results generated by an experiment execution. * SliceManager: - * Host - * Port + * Host: Location of the machine where the Katana Slice Manager is running + * Port: Port where the Slice Manager is listening > These values will be used to communicate with the Katana Slice Manager when deploying/decommissioning slices and when > using the `Run.SingleSliceCreationTime` and `Run.SliceCreationTime` tasks. * InfluxDb: * Enabled: If set to False the settings below will be ignored - * Host - * Port + * Host: Location of the machine where the InfluxDb instance is running + * Port: Port where InfluxDB is listening * User: InfluxDb instance user * Password: InfluxDb user password * Database: InfluxDb instance database @@ -346,7 +346,7 @@ The expected formats on a result name are "` [[]]`" and It is possible to define a set of available local resources. These resources can be specified as requirements for the execution of each kind of task inside a test case. -Resources are defined by including a YAML file in the `Resources`. The contents of these files are as follows: +Resources are defined by including a YAML file in the `Resources` folder. The contents of these files are as follows: - `Id`: Resource ID. This Id must be unique to the facility and will be used to identify the resource on the test cases. - `Name`: Name of the resource (visible on the ELCM dashboard). - `Icon`: Resource icon (visible on the ELCM dashboard). Uses Font Awesome (only free icons) @@ -366,7 +366,7 @@ dictionary that contains the collection of values that are to be customized by t When the experiment requests the deployment of a Network Slice the ELCM will create a NEST description. The NEST created by the ELCM has 3 main parts: - - A reference to a base slice descriptor, that must be available in the Katana Slice Manager. + - A reference to a base slice descriptor, which must be available in the Katana Slice Manager. - A collection of values that are to be overridden from the base slice descriptor, taken from the selected Scenario. - A possibly empty list of references to Network Services that are to be included as part of the Network Slice. @@ -586,7 +586,7 @@ The general workflow during a distributed experiment is as follows: - The Dispatcher of one of the platforms (the `Main` platform) receives a distributed experiment execution request, either from the Portal or through the Open APIs. - The Dispatcher performs the initial coordination, contacting with the ELCM of its own platform and the Dispatcher - of the remote platform (the `Secondary`platform). + of the remote platform (the `Secondary` platform). - Once the initial coordination is completed, the ELCM on both sides communicate directly for the rest of the experiment execution. - Each side performs the execution of their tasks as normal, unless they reach a point where they must coordinate: