From 7a50e2d4d530ae505c74320b9795d805232a7588 Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Thu, 24 May 2018 13:35:53 +0530 Subject: [PATCH 1/8] Add yang2spec lookup plugin * Generate a rules spec and json configuration schema from the input yang document. * The rules spec will be used to validate the json configuration to check if it adheres to the respective yang model. * The json configuration schema can be used as reference to build json configuration which conform with respective yang model. --- lookup_plugins/yang2spec.py | 1556 +++++++++++++++++++++++++++++++++++ 1 file changed, 1556 insertions(+) create mode 100644 lookup_plugins/yang2spec.py diff --git a/lookup_plugins/yang2spec.py b/lookup_plugins/yang2spec.py new file mode 100644 index 0000000..91122fe --- /dev/null +++ b/lookup_plugins/yang2spec.py @@ -0,0 +1,1556 @@ +# +# Copyright 2018 Red Hat | Ansible +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +from __future__ import (absolute_import, division, print_function) + +__metaclass__ = type + +DOCUMENTATION = """ + lookup: yang2spec + version_added: "2.5" + short_description: This plugin reads the content of given yang document and transforms it to a + rules spec and configuration schema. + description: + - This plugin parses yang document and transforms it into a spec which provides a set of rules + This rules spec can be used to validate the input configuration to check if it adheres + with respective yang model. It also outputs the configuration schema in json format and can + be used as reference to build input json configuration. + options: + _terms: + description: The path points to the location of the top level yang module which + is to be transformed into to Ansible spec. + required: True + search_path: + description: + - The path is a colon (:) separated list of directories to search for imported yang modules + in the yang file mentioned in C(path) option. If the value is not given it will search in + the current directory. + required: false +""" + +EXAMPLES = """ +- name: Get interface yang spec + set_fact: + interfaces_spec: "{{ lookup('yang2spec', 'openconfig/public/release/models/interfaces/openconfig-interfaces.yang', + search_path='openconfig/public/release/models:pyang/modules/') }}" +""" + +RETURN = """ + _list: + description: + - It returns the rules spec and json configuration schema. + type: complex + contains: + spec: + description: The rules spec in json format generated from given yang document + returned: success + type: dict + sample: | + { + "options": { + "interfaces": { + "suboptions": { + "interface": { + "suboptions": { + "config": { + "suboptions": { + "description": { + "type": "str" + }, + "enabled": { + "default": "true", + "type": "boolean" + }, + "loopback_mode": { + "default": "false", + "type": "boolean" + }, + "mtu": { + "restriction": { + "int_size": 16, + "max": 65535, + "min": 0 + }, + "type": "int" + }, + "name": { + "type": "str" + }, + } + }, + "suboptions_elements": "dict", + "suboptions_type": "list" + } + } + } + } + } + } + config_schema: + description: The json configuration schema generated from yang document + returned: success + type: dict + sample: | + { + "interfaces": { + "interface": { + "config": [ + { + "description": null, + "enabled": true, + "loopback_mode": false, + "mtu": null, + "name": null, + "type": null + } + ], + } + } + } +""" + +import os +import sys +import copy +import six +import shutil +import json +import imp + +from copy import deepcopy + +from ansible import constants as C +from ansible.plugins.lookup import LookupBase +from ansible.module_utils.six import StringIO, iteritems, string_types +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.utils.path import unfrackpath, makedirs_safe +from ansible.errors import AnsibleError + +try: + from __main__ import display +except ImportError: + from ansible.utils.display import Display + + display = Display() + +try: + from pyang import plugin + from pyang import statements + from pyang import util +except ImportError: + raise AnsibleError("pyang is not installed") + + +# The code to build the dependency tree from the instantiated tree that pyang has +# already parsed is referred from +# https://github.com/robshakir/pyangbind/blob/master/pyangbind/plugin/pybind.py +# and it is customised to fit in Ansible infra. + +# After the dependency tree is build it is parsed to emit the Ansible spec and configuration schema +# in json format. +# Due to current structure of pybind plugin the referred code cannot be reused as it is +# or imported from pyangbind library. + +def warning(msg): + if C.ACTION_WARNINGS: + display.warning(msg) + + +# Python3 support +if six.PY3: + long = int + unicode = str + + +# YANG is quite flexible in terms of what it allows as input to a boolean +# value, this map is used to provide a mapping of these values to the python +# True and False boolean instances. +class_bool_map = { + 'false': False, + 'False': False, + 'true': True, + 'True': True, +} + +class_map = { + # this map is dynamically built upon but defines how we take + # a YANG type and translate it into a native Python class + # along with other attributes that are required for this mapping. + # + # key: the name of the YANG type + # native_type: the Python class that is used to support this + # YANG type natively. + # map (optional): a map to take input values and translate them + # into valid values of the type. + # base_type: types that cannot be supported natively, such + # as enumeration, or a string with a restriction placed on it) + # quote_arg (opt): whether the argument needs to be quoted (e.g., str("hello")) in + # be quoted (e.g., str("hello")) in the code that is + # output. + # parent_type (opt): for "derived" types, then we store what the enclosed + # type is such that we can create instances where + # required e.g., a restricted string will have a + # parent_type of a string. this can be a list if the + # type is a union. + # restriction ...: where the type is a restricted type, then the + # (optional) class_map dict entry can store more information about + # the type of restriction. + # Other types may add their own types to this dictionary that have + # meaning only for themselves. For example, a ReferenceType can add the + # path it references, and whether the require-instance keyword was set + # or not. + # + 'boolean': { + "native_type": "boolean", + "base_type": True, + "quote_arg": True, + }, + 'binary': { + "native_type": "bitarray", + "base_type": True, + "quote_arg": True + }, + 'uint8': { + "native_type": "int", + "base_type": True, + "restriction_dict": {'min': 0, 'max': 255, 'int_size': 8} + }, + 'uint16': { + "native_type": "int", + "base_type": True, + "restriction_dict": {'min': 0, 'max': 65535, 'int_size': 16} + }, + 'uint32': { + "native_type": "int", + "base_type": True, + "restriction_dict": {'min': 0, 'max': 4294967295, 'int_size': 32} + }, + 'uint64': { + "native_type": "long", + "base_type": True, + "restriction_dict": {'min': 0, 'max': 18446744073709551615, 'int_size': 64} + }, + 'string': { + "native_type": "str", + "base_type": True, + "quote_arg": True + }, + 'decimal64': { + "native_type": "float", + "base_type": True, + }, + 'empty': { + "native_type": "empty", + "map": class_bool_map, + "base_type": True, + "quote_arg": True, + }, + 'int8': { + "native_type": "int", + "base_type": True, + "restriction_dict": {'min': -128, 'max': 127, 'int_size': 8} + }, + 'int16': { + "native_type": "int", + "base_type": True, + "restriction_dict": {'min': -32768, 'max': 32767, 'int_size': 16} + }, + 'int32': { + "native_type": "int", + "base_type": True, + "restriction_dict": {'min': -2147483648, 'max': 2147483647, 'int_size': 32} + }, + 'int64': { + "native_type": "long", + "base_type": True, + "restriction_dict": {'min': -9223372036854775808, 'max': 9223372036854775807, 'int_size': 64} + }, +} + +# We have a set of types which support "range" statements in RFC6020. This +# list determins types that should be allowed to have a "range" argument. +INT_RANGE_TYPES = ["uint8", "uint16", "uint32", "uint64", + "int8", "int16", "int32", "int64"] + +# The types that are built-in to YANG +YANG_BUILTIN_TYPES = list(class_map.keys()) + \ + ["container", "list", "rpc", "notification", "leafref"] + +YANG2SPEC_PLUGIN_PATH = "~/.ansible/tmp/yang2spec" + +# Words that could turn up in YANG definition files that are actually +# reserved names in Python, such as being builtin types. This list is +# not complete, but will probably continue to grow. +reserved_name = ["list", "str", "int", "global", "decimal", "float", + "as", "if", "else", "elif", "map", "set", "class", + "from", "import", "pass", "return", "is", "exec", + "pop", "insert", "remove", "add", "delete", "local", + "get", "default", "yang_name", "def", "print", "del", + "break", "continue", "raise", "in", "assert", "while", + "for", "try", "finally", "with", "except", "lambda", + "or", "and", "not", "yield", "property", "min", "max"] + +ansible_spec_header = {} +ansible_spec_option = {"options": {}} +ansible_spec_return = {"return": {}} + + +class LookupModule(LookupBase): + VALID_FILE_EXTENSIONS = ('.yang',) + + def run(self, terms, variables=None, **kwargs): + + try: + yang_file = terms[0] + except IndexError: + raise AnsibleError('the yang file must be specified') + + if not os.path.isfile(yang_file): + raise AnsibleError('%s invalid file path' % yang_file) + + search_path = kwargs.pop('search_path', '') + + for path in search_path.split(':'): + if path is not '' and not os.path.isdir(path): + raise AnsibleError('%s is invalid directory path' % path) + + pyang_exec_path = find_file_in_path('pyang') + + pyang_exec = imp.load_source('pyang', pyang_exec_path) + + saved_arg = deepcopy(sys.argv) + + saved_stdout = sys.stdout + sys.stdout = StringIO() + + plugin_file_src = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'yang2spec.py') + + plugindir = unfrackpath(YANG2SPEC_PLUGIN_PATH) + makedirs_safe(plugindir) + + shutil.copy(plugin_file_src, plugindir) + + # fill in the sys args before invoking pyang + sys.argv = [pyang_exec_path, '--plugindir', plugindir, '-f', 'yang2spec', yang_file, '-p', search_path] + + res = list() + try: + pyang_exec.run() + except SystemExit: + pass + + res.append(sys.stdout.getvalue()) + + sys.argv = saved_arg + sys.stdout = saved_stdout + + shutil.rmtree(plugindir, ignore_errors=True) + return res + + +class Yang2Spec(plugin.PyangPlugin): + def add_output_format(self, fmts): + # Add the 'pybind' output format to pyang. + self.multiple_modules = True + fmts['yang2spec'] = self + + def emit(self, ctx, modules, fd): + # When called, call the build_pyangbind function. + build_spec(ctx, modules, fd) + emit_ansible_spec(fd) + + def add_opts(self, optparser): + # Add yang2spec specific operations to pyang. + pass + + +def find_file_in_path(filename): + # Check $PATH first, followed by same directory as sys.argv[0] + paths = os.environ['PATH'].split(os.pathsep) + [os.path.dirname(sys.argv[0])] + for dirname in paths: + fullpath = os.path.join(dirname, filename) + if os.path.isfile(fullpath): + return fullpath + + +# Base machinery to support operation as a plugin to pyang. +def pyang_plugin_init(): + plugin.register_plugin(Yang2Spec()) + + +def emit_ansible_spec(fd): + output = { + "spec": ansible_spec_option, + "config_schema": emit_json_schema() + } + fd.write(json.dumps(output)) + + +def parse_suboptions(config, suboptions): + suboptions = suboptions.get('suboptions') + type = suboptions.pop('suboptions_type', None) + elements = suboptions.pop('suboptions_elements', None) + + for k, v in iteritems(suboptions): + if type == 'list': + config[k] = [{}] + else: + config[k] = {} + if isinstance(v, dict): + if v.get('suboptions'): + if isinstance(config[k], list): + parse_suboptions(config[k][0], v) + else: + parse_suboptions(config[k], v) + else: + parse_options(config, k, v) + else: + config[k] = v + return config + + +def parse_options(config, name, spec): + type = spec.get('type', 'str') + if type == 'dict': + config[name] = {} + elif type == 'list': + config[name] = [] + else: + config[name] = None + + default = spec.get('default') + if default: + if type in ('int', 'long'): + config[name] = int(default) + elif type in ('float',): + config[name] = float(default) + elif type in ('boolean',): + config[name] = boolean(default) + else: + config[name] = default + + +def emit_json_schema(): + options_spec = deepcopy(ansible_spec_option) + options = options_spec.get('options') + config = {} + for k, v in iteritems(options): + config[k] = {} + if v.get('suboptions'): + parse_suboptions(config[k], v) + else: + parse_options(config, k, v) + return config + + +def safe_name(arg): + """ + Make a leaf or container name safe for use in Python. + """ + arg = arg.replace("-", "_") + arg = arg.replace(".", "_") + if arg in reserved_name: + arg += "_" + # store the unsafe->original version mapping + # so that we can retrieve it when get() is called. + return arg + + +def module_import_prefixes(ctx): + mod_ref_prefixes = {} + for mod in ctx.modules: + m = ctx.search_module(0, mod[0]) + for importstmt in m.search('import'): + if not importstmt.arg in mod_ref_prefixes: + mod_ref_prefixes[importstmt.arg] = [] + mod_ref_prefixes[importstmt.arg].append(importstmt.search_one('prefix').arg) + return mod_ref_prefixes + + +def find_child_definitions(obj, defn, prefix, definitions): + for i in obj.search(defn): + if i.arg in definitions: + sys.stderr.write("WARNING: duplicate definition of %s" % i.arg) + else: + definitions["%s:%s" % (prefix, i.arg)] = i + + possible_parents = [ + 'grouping', 'container', + 'list', 'rpc', 'input', + 'output', 'notification' + ] + + for parent_type in possible_parents: + for ch in obj.search(parent_type): + if ch.i_children: + find_child_definitions(ch, defn, prefix, definitions) + + return definitions + + +def find_definitions(defn, ctx, module, prefix): + # Find the statements within a module that map to a particular type of + # statement, for instance - find typedefs, or identities, and reutrn them + # as a dictionary to the calling function. + definitions = {} + return find_child_definitions(module, defn, prefix, definitions) + + +class Identity(object): + def __init__(self, name): + self.name = name + self.source_module = None + self._imported_prefixes = [] + self.source_namespace = None + self.base = None + self.children = [] + + def add_prefix(self, prefix): + if not prefix in self._imported_prefixes: + self._imported_prefixes.append(prefix) + + def add_child(self, child): + if not isinstance(child, Identity): + raise ValueError("Must supply a identity as a child") + self.children.append(child) + + def __str__(self): + return "%s:%s" % (self.source_module, self.name) + + def prefixes(self): + return self._imported_prefixes + + +class IdentityStore(object): + def __init__(self): + self._store = [] + + def find_identity_by_source_name(self, s, n): + for i in self._store: + if i.source_module == s and i.name == n: + return i + + def add_identity(self, i): + if isinstance(i, Identity): + if not self.find_identity_by_source_name(i.source_module, i.name): + self._store.append(i) + else: + raise ValueError("Must specify an identity") + + def identities(self): + return ["%s:%s" % (i.source_module, i.name) for i in self._store] + + def __iter__(self): + return iter(self._store) + + def build_store_from_definitions(self, ctx, defnd): + unresolved_identities = list(defnd.keys()) + unresolved_identity_count = {k: 0 for k in defnd} + error_ids = [] + + mod_ref_prefixes = module_import_prefixes(ctx) + + while len(unresolved_identities): + this_id = unresolved_identities.pop(0) + iddef = defnd[this_id] + + base = iddef.search_one('base') + try: + mainmod = iddef.main_module() + except AttributeError: + mainmod = None + if mainmod is not None: + defmod = mainmod + + defining_module = defmod.arg + namespace = defmod.search_one('namespace').arg + prefix = defmod.search_one('prefix').arg + + if base is None: + # Add a new identity which can be a base + tid = Identity(iddef.arg) + tid.source_module = defining_module + tid.source_namespace = namespace + tid.add_prefix(prefix) + self.add_identity(tid) + + if defining_module in mod_ref_prefixes: + for i in mod_ref_prefixes[defining_module]: + tid.add_prefix(i) + + else: + # Determine what the name of the base and the prefix for + # the base should be + if ":" in base.arg: + base_pfx, base_name = base.arg.split(":") + else: + base_pfx, base_name = prefix, base.arg + + parent_module = util.prefix_to_module(defmod, base_pfx, + base.pos, ctx.errors) + + # Find whether we have the base in the store + base_id = self.find_identity_by_source_name(parent_module.arg, base_name) + + if base_id is None: + # and if not, then push this identity back onto the stack + unresolved_identities.append(this_id) + unresolved_identity_count[this_id] += 1 + else: + # Check we don't already have this identity defined + if self.find_identity_by_source_name(defining_module, iddef.arg) is None: + # otherwise, create a new identity that reflects this one + tid = Identity(iddef.arg) + tid.source_module = defining_module + tid.source_namespace = namespace + tid.add_prefix(prefix) + base_id.add_child(tid) + self.add_identity(tid) + + if defining_module in mod_ref_prefixes: + for i in mod_ref_prefixes[defining_module]: + tid.add_prefix(i) + + if error_ids: + raise TypeError("could not resolve identities %s" % error_ids) + + self._build_inheritance() + + def _recurse_children(self, identity, children): + for child in identity.children: + children.append(child) + self._recurse_children(child, children) + return children + + def _build_inheritance(self): + for i in self._store: + ch = list() + self._recurse_children(i, ch) + i.children = ch + + +# Core function to build the Ansible spec output - starting with building the +# dependencies - and then working through the instantiated tree that pyang has +# already parsed. +def build_spec(ctx, modules, fd): + # Restrict the output of the plugin to only the modules that are supplied + # to pyang. + module_d = {} + for mod in modules: + module_d[mod.arg] = mod + pyang_called_modules = module_d.keys() + + # Bail if there are pyang errors, since this certainly means that the + # output will fail - unless these are solely due to imports that + # we provided but then unused. + if len(ctx.errors): + for e in ctx.errors: + display.display("INFO: encountered %s" % str(e)) + if not e[1] in ["UNUSED_IMPORT", "PATTERN_ERROR"]: + raise AnsibleError("FATAL: yang2spec cannot build module that pyang" + + " has found errors with.\n") + + # Determine all modules, and submodules that are needed, along with the + # prefix that is used for it. We need to ensure that we understand all of the + # prefixes that might be used to reference an identity or a typedef. + all_mods = [] + for module in modules: + local_module_prefix = module.search_one('prefix') + if local_module_prefix is None: + local_module_prefix = \ + module.search_one('belongs-to').search_one('prefix') + if local_module_prefix is None: + raise AttributeError("A module (%s) must have a prefix or parent " + + "module") + local_module_prefix = local_module_prefix.arg + else: + local_module_prefix = local_module_prefix.arg + mods = [(local_module_prefix, module)] + + imported_modules = module.search('import') + + # 'include' statements specify the submodules of the existing module - + # that also need to be parsed. + for i in module.search('include'): + subm = ctx.get_module(i.arg) + if subm is not None: + mods.append((local_module_prefix, subm)) + # Handle the case that imports are within a submodule + if subm.search('import') is not None: + imported_modules.extend(subm.search('import')) + + # 'import' statements specify the other modules that this module will + # reference. + for j in imported_modules: + mod = ctx.get_module(j.arg) + if mod is not None: + imported_module_prefix = j.search_one('prefix').arg + mods.append((imported_module_prefix, mod)) + modules.append(mod) + all_mods.extend(mods) + + # remove duplicates from the list (same module and prefix) + new_all_mods = [] + for mod in all_mods: + if mod not in new_all_mods: + new_all_mods.append(mod) + all_mods = new_all_mods + + # Build a list of the 'typedef' and 'identity' statements that are included + # in the modules supplied. + defn = {} + for defnt in ['typedef', 'identity']: + defn[defnt] = {} + for m in all_mods: + t = find_definitions(defnt, ctx, m[1], m[0]) + for k in t: + if k not in defn[defnt]: + defn[defnt][k] = t[k] + + # Build the identities and typedefs (these are added to the class_map which + # is globally referenced). + build_identities(ctx, defn['identity']) + build_typedefs(ctx, defn['typedef']) + + # Iterate through the tree which pyang has built, solely for the modules + # that pyang was asked to build + for modname in pyang_called_modules: + module = module_d[modname] + mods = [module] + for i in module.search('include'): + subm = ctx.get_module(i.arg) + if subm is not None: + mods.append(subm) + for m in mods: + children = [ch for ch in module.i_children + if ch.keyword in statements.data_definition_keywords] + get_children(ctx, fd, children, m, m) + + +def build_identities(ctx, defnd): + # Build a storage object that has all the definitions that we + # require within it. + idstore = IdentityStore() + idstore.build_store_from_definitions(ctx, defnd) + + identity_dict = {} + for identity in idstore: + for prefix in identity.prefixes(): + ident = "%s:%s" % (prefix, identity.name) + identity_dict[ident] = {} + identity_dict["%s" % identity.name] = {} + for ch in identity.children: + d = {"@module": ch.source_module, "@namespace": ch.source_namespace} + for cpfx in ch.prefixes() + [None]: + if cpfx is not None: + spfx = "%s:" % cpfx + else: + spfx = "" + identity_dict[ident][ch.name] = d + identity_dict[identity.name][ch.name] = d + identity_dict[ident]["%s%s" % (spfx, ch.name)] = d + identity_dict[identity.name]["%s%s" % (spfx, ch.name)] = d + + if not identity.name in identity_dict: + identity_dict[identity.name] = {} + + # Add entries to the class_map such that this identity can be referenced by + # elements that use this identity ref. + for i in identity_dict: + id_type = {"native_type": "identity", + "restriction_argument": identity_dict[i], + "restriction_type": "dict_key", + "parent_type": "string", + "base_type": False} + class_map[i] = id_type + + +def build_typedefs(ctx, defnd): + # Build the type definitions that are specified within a model. Since + # typedefs are essentially derived from existing types, order of processing + # is important - we need to go through and build the types in order where + # they have a known 'type'. + unresolved_tc = {} + for i in defnd: + unresolved_tc[i] = 0 + unresolved_t = list(defnd.keys()) + error_ids = [] + known_types = list(class_map.keys()) + known_types.append('enumeration') + known_types.append('leafref') + base_types = copy.deepcopy(known_types) + process_typedefs_ordered = [] + + while len(unresolved_t): + t = unresolved_t.pop(0) + base_t = defnd[t].search_one('type') + if base_t.arg == "union": + subtypes = [] + for i in base_t.search('type'): + if i.arg == "identityref": + subtypes.append(i.search_one('base')) + else: + subtypes.append(i) + elif base_t.arg == "identityref": + subtypes = [base_t.search_one('base')] + else: + subtypes = [base_t] + + any_unknown = False + for i in subtypes: + # Resolve this typedef to the module that it + # was defined by + + if ":" in i.arg: + defining_module = util.prefix_to_module(defnd[t].i_module, + i.arg.split(":")[0], defnd[t].pos, ctx.errors) + else: + defining_module = defnd[t].i_module + + belongs_to = defining_module.search_one('belongs-to') + if belongs_to is not None: + for mod in ctx.modules: + if mod[0] == belongs_to.arg: + defining_module = ctx.modules[mod] + + real_pfx = defining_module.search_one('prefix').arg + + if ":" in i.arg: + tn = u"%s:%s" % (real_pfx, i.arg.split(":")[1]) + elif i.arg not in base_types: + # If this was not a base type (defined in YANG) then resolve it + # to the module it belongs to. + tn = u"%s:%s" % (real_pfx, i.arg) + else: + tn = i.arg + + if tn not in known_types: + any_unknown = True + + if not any_unknown: + process_typedefs_ordered.append((t, defnd[t])) + known_types.append(t) + else: + unresolved_tc[t] += 1 + if unresolved_tc[t] > 1000: + # Take a similar approach to the resolution of identities. If we have a + # typedef that has a type in it that is not found after many iterations + # then we should bail. + error_ids.append(t) + sys.stderr.write("could not find a match for %s type -> %s\n" % + (t, [i.arg for i in subtypes])) + else: + unresolved_t.append(t) + + if error_ids: + raise TypeError("could not resolve typedefs %s" % error_ids) + + # Process the types that we built above. + for i_tuple in process_typedefs_ordered: + item = i_tuple[1] + type_name = i_tuple[0] + mapped_type = False + restricted_arg = False + # Copy the class_map entry - this is done so that we do not alter the + # existing instance in memory as we add to it. + cls, elemtype = copy.deepcopy(build_elemtype(ctx, item.search_one('type'))) + known_types = list(class_map.keys()) + # Enumeration is a native type, but is not natively supported + # in the class_map, and hence we append it here. + known_types.append("enumeration") + known_types.append("leafref") + + # Don't allow duplicate definitions of types + if type_name in known_types: + raise TypeError("Duplicate definition of %s" % type_name) + default_stmt = item.search_one('default') + + # 'elemtype' is a list when the type includes a union, so we need to go + # through and build a type definition that supports multiple types. + if not isinstance(elemtype, list): + restricted = False + # Map the original type to the new type, parsing the additional arguments + # that may be specified, for example, a new default, a pattern that must + # be matched, or a length (stored in the restriction_argument, and + # restriction_type class_map variables). + class_map[type_name] = {"base_type": False} + class_map[type_name]["native_type"] = elemtype["native_type"] + if "parent_type" in elemtype: + class_map[type_name]["parent_type"] = elemtype["parent_type"] + else: + yang_type = item.search_one('type').arg + if yang_type not in known_types: + raise TypeError("typedef specified a native type that was not " + + "supported") + class_map[type_name]["parent_type"] = yang_type + if default_stmt is not None: + class_map[type_name]["default"] = default_stmt.arg + if "referenced_path" in elemtype: + class_map[type_name]["referenced_path"] = elemtype["referenced_path"] + class_map[type_name]["class_override"] = "leafref" + if "require_instance" in elemtype: + class_map[type_name]["require_instance"] = elemtype["require_instance"] + if "restriction_type" in elemtype: + class_map[type_name]["restriction_type"] = \ + elemtype["restriction_type"] + class_map[type_name]["restriction_argument"] = \ + elemtype["restriction_argument"] + if "quote_arg" in elemtype: + class_map[type_name]["quote_arg"] = elemtype["quote_arg"] + else: + # Handle a typedef that is a union - extended the class_map arguments + # to be a list that is parsed by the relevant dynamic type generation + # function. + native_type = [] + parent_type = [] + default = False if default_stmt is None else default_stmt.arg + for i in elemtype: + + if isinstance(i[1]["native_type"], list): + native_type.extend(i[1]["native_type"]) + else: + native_type.append(i[1]["native_type"]) + + if i[1]["yang_type"] in known_types: + parent_type.append(i[1]["yang_type"]) + elif i[1]["yang_type"] == "identityref": + parent_type.append(i[1]["parent_type"]) + else: + msg = "typedef in a union specified a native type that was not" + msg += " supported (%s in %s)" % (i[1]["yang_type"], item.arg) + raise TypeError(msg) + + if "default" in i[1] and not default: + # When multiple 'default' values are specified within a union that + # is within a typedef, then it will choose the first one. + q = True if "quote_arg" in i[1] else False + default = (i[1]["default"], q) + class_map[type_name] = {"native_type": native_type, "base_type": False, + "parent_type": parent_type} + if default: + class_map[type_name]["default"] = default[0] + class_map[type_name]["quote_default"] = default[1] + + class_map[type_name.split(":")[1]] = class_map[type_name] + + +def get_children(ctx, fd, i_children, module, parent, path=str(), + parent_cfg=True, choice=False, register_paths=True): + # Iterative function that is called for all elements that have childen + # data nodes in the tree. This function resolves those nodes into the + # relevant leaf, or container/list configuration and outputs the python + # code that corresponds to it to the relevant file. parent_cfg is used to + # ensure that where a parent container was set to config false, this is + # inherited by all elements below it; and choice is used to store whether + # these leaves are within a choice or not. + used_types, elements = [], [] + choices = False + + # If we weren't asked to split the files, then just use the file handle + # provided. + nfd = fd + + if parent_cfg: + # The first time we find a container that has config false set on it + # then we need to hand this down the tree - we don't need to look if + # parent_cfg has already been set to False as we need to inherit. + parent_config = parent.search_one('config') + if parent_config is not None: + parent_config = parent_config.arg + if parent_config.upper() == "FALSE": + # this container is config false + parent_cfg = False + + for ch in i_children: + children_tmp = getattr(ch, "i_children", None) + if children_tmp is not None: + children_tmp = [i.arg for i in children_tmp] + if ch.keyword == "choice": + for choice_ch in ch.i_children: + # these are case statements + for case_ch in choice_ch.i_children: + elements += get_element(ctx, fd, case_ch, module, parent, + path + "/" + case_ch.arg, parent_cfg=parent_cfg, + choice=(ch.arg, choice_ch.arg), register_paths=register_paths) + else: + elements += get_element(ctx, fd, ch, module, parent, path + "/" + ch.arg, + parent_cfg=parent_cfg, choice=choice, register_paths=register_paths) + + # 'container', 'module', 'list' and 'submodule' all have their own classes + # generated. + if parent.keyword in ["container", "module", "list", "submodule", "input", + "output", "rpc", "notification"]: + + if path == "" and ansible_spec_header.get('module_name') is None: + ansible_spec_header['module_name'] = safe_name(parent.arg) + + parent_descr = parent.search_one('description') + if parent_descr is not None: + ansible_spec_header['description'] = parent_descr.arg.decode('utf8').encode('ascii', + 'ignore').strip().replace( + '\n', ' ') + ansible_spec_header['short_description'] = ansible_spec_header['description'].split('.')[0] + else: + ansible_spec_header['description'] = "" + ansible_spec_header['short_description'] = "" + + ansible_spec_header['ansible_metadata'] = dict(metadata_version=1.1, + status=['preview'], + supported_by='network') + + # If the container is actually a list, then determine what the key value + # is and store this such that we can give a hint. + keyval = False + if parent.keyword == "list": + keyval = parent.search_one('key').arg if parent.search_one('key') \ + is not None else False + if keyval and " " in keyval: + keyval = keyval.split(" ") + else: + keyval = [keyval] + + else: + raise TypeError("unhandled keyword with children %s at %s" % + (parent.keyword, parent.pos)) + + if len(elements) == 0: + pass + else: + for i in elements: + if i['yang_name'] == 'peer-type': + pass + if i["config"] and parent_cfg: + spec = ansible_spec_option["options"] + else: + spec = ansible_spec_return["return"] + + default_arg = None + if "default" in i and not i["default"] is None: + default_arg = "\"%s\"" % (i["default"]) if i["quote_arg"] else "%s" \ + % i["default"] + if i["class"] in ("leaf", "leaf-list"): + spec = get_node_dict(i, spec) + + if default_arg is not None: + spec['default'] = default_arg + + if i.get('type'): + spec['type'] = i['type'] + + if i.get('restriction_argument'): + spec['restriction'] = i['restriction_argument'] + spec['restriction_type'] = i.get('restriction_type') + elif i.get('restriction_dict'): + spec['restriction'] = i['restriction_dict'] + + if i.get('default'): + spec['default'] = i['default'] + + if i.get('description'): + spec['description'] = i["description"].decode('utf-8').encode('ascii', 'ignore').replace('\n', ' ') + + if keyval and i['yang_name'] in keyval: + spec['required'] = True + + if i.get("class") == "leaf-list": + spec['elements'] = spec['type'] + spec['type'] = 'list' + + elif i["class"] == "container": + spec = get_node_dict(i, spec) + if i.get('presence'): + spec['presence'] = True + + elif i["class"] == "list": + spec = get_node_dict(i, spec) + spec['suboptions_type'] = 'list' + spec['suboptions_elements'] = 'dict' + + return None + + +def build_elemtype(ctx, et, prefix=False): + # Build a dictionary which defines the type for the element. This is used + # both in the case that a typedef needs to be built, as well as on per-list + # basis. + cls = None + pattern_stmt = et.search_one('pattern') if not et.search_one('pattern') \ + is None else False + range_stmt = et.search_one('range') if not et.search_one('range') \ + is None else False + length_stmt = et.search_one('length') if not et.search_one('length') \ + is None else False + + # Determine whether there are any restrictions that are placed on this leaf, + # and build a dictionary of the different restrictions to be placed on the + # type. + restrictions = {} + if pattern_stmt: + restrictions['pattern'] = pattern_stmt.arg + + if length_stmt: + if "|" in length_stmt.arg: + restrictions['length'] = [i.replace(' ', '') for i in + length_stmt.arg.split("|")] + else: + restrictions['length'] = [length_stmt.arg] + + if range_stmt: + # Complex ranges are separated by pipes + if "|" in range_stmt.arg: + restrictions['range'] = [i.replace(' ', '') for i in + range_stmt.arg.split("|")] + else: + restrictions['range'] = [range_stmt.arg] + + # Build RestrictedClassTypes based on the compiled dictionary and the + # underlying base type. + if len(restrictions): + if 'length' in restrictions or 'pattern' in restrictions: + cls = "restricted-%s" % (et.arg) + elemtype = { + "native_type": class_map[et.arg]["native_type"], + "restriction_dict": restrictions, + "parent_type": et.arg, + "base_type": False, + } + elif 'range' in restrictions: + cls = "restricted-%s" % et.arg + elemtype = { + "native_type": class_map[et.arg]["native_type"], + "restriction_dict": restrictions, + "parent_type": et.arg, + "base_type": False, + } + + # Handle all other types of leaves that are not restricted classes. + if cls is None: + cls = "leaf" + if et.arg == "enumeration": + enumeration_dict = {} + for enum in et.search('enum'): + enumeration_dict[unicode(enum.arg)] = {} + val = enum.search_one('value') + if val is not None: + enumeration_dict[unicode(enum.arg)]["value"] = int(val.arg) + elemtype = {"native_type": "enumeration", + "restriction_argument": enumeration_dict, + "restriction_type": "dict_key", + "parent_type": "string", + "base_type": False} + + # Map decimal64 to a RestrictedPrecisionDecimalType - this is there to + # ensure that the fraction-digits argument can be implemented. Note that + # fraction-digits is a mandatory argument. + elif et.arg == "decimal64": + fd_stmt = et.search_one('fraction-digits') + if fd_stmt is not None: + cls = "restricted-decimal64" + elemtype = {"native_type": fd_stmt.arg, + "base_type": False, + "parent_type": "decimal64"} + else: + elemtype = class_map[et.arg] + # Handle unions - build a list of the supported types that are under the + # union. + elif et.arg == "union": + elemtype = [] + for uniontype in et.search('type'): + elemtype_s = copy.deepcopy(build_elemtype(ctx, uniontype)) + elemtype_s[1]["yang_type"] = uniontype.arg + elemtype.append(elemtype_s) + cls = "union" + # Map leafrefs to a ReferenceType, handling the referenced path, and + # whether require-instance is set. When xpathhelper is not specified, then + # no such mapping is done - at this point, we solely map to a string. + elif et.arg == "leafref": + path_stmt = et.search_one('path') + if path_stmt is None: + raise ValueError("leafref specified with no path statement") + require_instance = \ + class_bool_map[et.search_one('require-instance').arg] \ + if et.search_one('require-instance') \ + is not None else True + + elemtype = { + "native_type": "unicode", + "parent_type": "string", + "base_type": False, + } + # Handle identityrefs, but check whether there is a valid base where this + # has been specified. + elif et.arg == "identityref": + base_stmt = et.search_one('base') + if base_stmt is None: + raise ValueError("identityref specified with no base statement") + try: + elemtype = class_map[base_stmt.arg] + except KeyError: + display.debug(class_map.keys()) + display.debug(et.arg) + display.debug(base_stmt.arg) + raise AnsibleError("FATAL: identityref with an unknown base\n") + else: + # For all other cases, then we should be able to look up directly in the + # class_map for the defined type, since these are not 'derived' types + # at this point. In the case that we are referencing a type that is a + # typedef, then this has been added to the class_map. + try: + elemtype = class_map[et.arg] + except KeyError: + passed = False + if prefix: + try: + tmp_name = "%s:%s" % (prefix, et.arg) + elemtype = class_map[tmp_name] + passed = True + except: + pass + if passed is False: + display.debug(class_map.keys()) + display.debug(et.arg) + display.debug(prefix) + raise AnsibleError("FATAL: unmapped type (%s)\n" % (et.arg)) + + if isinstance(elemtype, list): + cls = "leaf-union" + elif "class_override" in elemtype: + # this is used to propagate the fact that in some cases the + # native type needs to be dynamically built (e.g., leafref) + cls = elemtype["class_override"] + + return cls, elemtype + + +def find_absolute_default_type(default_type, default_value, elemname): + if not isinstance(default_type, list): + return default_type + + for i in default_type: + if not i[1]["base_type"]: + test_type = class_map[i[1]["parent_type"]] + else: + test_type = i[1] + try: + default_type = test_type + break + except (ValueError, TypeError): + pass + return find_absolute_default_type(default_type, default_value, elemname) + + +def get_node_dict(element, spec): + xpath = element["path"] + if xpath.startswith('/'): + xpath = xpath[1:] + + xpath = xpath.split('/') + xpath_len = len(xpath) + for index, item in enumerate(xpath): + if item not in spec.keys(): + if index == (xpath_len - 1): + spec[item] = dict() + spec = spec[item] + else: + spec[item] = dict() + spec[item]['suboptions'] = dict() + spec = spec[item]['suboptions'] + else: + spec = spec[item]['suboptions'] if 'suboptions' in spec[item] else spec[item] + return spec + + +def get_element(ctx, fd, element, module, parent, path, + parent_cfg=True, choice=False, register_paths=True): + # Handle mapping of an invidual element within the model. This function + # produces a dictionary that can then be mapped into the relevant code that + # dynamically generates a class. + + # Find element's namespace and defining module + # If the element has the "main_module" attribute then it is part of a + # submodule and hence we should check the namespace and defining module + # of this, rather than the submodule + if hasattr(element, "main_module"): + element_module = element.main_module() + elif hasattr(element, "i_orig_module"): + element_module = element.i_orig_module + else: + element_module = None + + namespace = element_module.search_one("namespace").arg if \ + element_module.search_one("namespace") is not None else \ + None + defining_module = element_module.arg + + this_object = [] + default = False + has_children = False + create_list = False + + elemdescr = element.search_one('description') + if elemdescr is None: + elemdescr = False + else: + elemdescr = elemdescr.arg + + # If the element has an i_children attribute then this is a container, list + # leaf-list or choice. Alternatively, it can be the 'input' or 'output' + # substmts of an RPC or a notification + if hasattr(element, 'i_children'): + if element.keyword in ["container", "list", "input", "output", "notification"]: + has_children = True + elif element.keyword in ["leaf-list"]: + create_list = True + + # Fixup the path when within a choice, because this iteration belives that + # we are under a new container, but this does not exist in the path. + if element.keyword in ["choice"]: + path_parts = path.split("/") + npath = "" + for i in range(0, len(path_parts) - 1): + npath += "%s/" % path_parts[i] + npath.rstrip("/") + else: + npath = path + + # Create an element for a container. + if element.i_children: + chs = element.i_children + has_presence = True if element.search_one('presence') is not None else False + if has_presence is False and len(chs) == 0: + return [] + + get_children(ctx, fd, chs, module, element, npath, parent_cfg=parent_cfg, + choice=choice, register_paths=register_paths) + + elemdict = { + "name": safe_name(element.arg), "origtype": element.keyword, + "class": element.keyword, + "path": safe_name(npath), "config": True, + "description": elemdescr, + "yang_name": element.arg, + "choice": choice, + "register_paths": register_paths, + "namespace": namespace, + "defining_module": defining_module, + "presence": has_presence, + } + + # Otherwise, give a unique name for the class within the dictionary. + elemdict["type"] = "%s_%s_%s" % (safe_name(element.arg), + safe_name(module.arg), + safe_name(path.replace("/", "_"))) + + # Deal with specific cases for list - such as the key and how it is + # ordered. + if element.keyword == "list": + elemdict["key"] = safe_name(element.search_one("key").arg) \ + if element.search_one("key") is not None else False + elemdict["yang_keys"] = element.search_one("key").arg \ + if element.search_one("key") is not None else False + user_ordered = element.search_one('ordered-by') + elemdict["user_ordered"] = True if user_ordered is not None \ + and user_ordered.arg.upper() == "USER" else False + this_object.append(elemdict) + has_children = True + + # Deal with the cases that the attribute does not have children. + if not has_children: + if element.keyword in ["leaf-list"]: + create_list = True + cls, elemtype = copy.deepcopy(build_elemtype(ctx, + element.search_one('type'))) + + # Determine what the default for the leaf should be where there are + # multiple available. + # Algorithm: + # - build a tree that is rooted on this class. + # - perform a breadth-first search - the first node found + # - that has the "default" leaf set, then we take this + # as the value for the default + + # then starting at the selected default node, traverse + # until we find a node that is declared to be a base_type + elemdefault = element.search_one('default') + default_type = False + quote_arg = False + if elemdefault is not None: + elemdefault = elemdefault.arg + default_type = elemtype + if isinstance(elemtype, list): + # this is a union, we should check whether any of the types + # immediately has a default + for i in elemtype: + if "default" in i[1]: + elemdefault = i[1]["default"] + default_type = i[1] + elif "default" in elemtype: + # if the actual type defines the default, then we need to maintain + # this + elemdefault = elemtype["default"] + default_type = elemtype + + # we need to indicate that the default type for the class_map + # is str + tmp_class_map = copy.deepcopy(class_map) + tmp_class_map["enumeration"] = {"parent_type": "string"} + + if not default_type: + if isinstance(elemtype, list): + # this type has multiple parents + for i in elemtype: + if "parent_type" in i[1]: + if isinstance(i[1]["parent_type"], list): + to_visit = [j for j in i[1]["parent_type"]] + else: + to_visit = [i[1]["parent_type"]] + elif "parent_type" in elemtype: + if isinstance(elemtype["parent_type"], list): + to_visit = [i for i in elemtype["parent_type"]] + else: + to_visit = [elemtype["parent_type"]] + + checked = list() + while to_visit: + check = to_visit.pop(0) + if check not in checked: + checked.append(check) + if "parent_type" in tmp_class_map[check]: + if isinstance(tmp_class_map[check]["parent_type"], list): + to_visit.extend(tmp_class_map[check]["parent_type"]) + else: + to_visit.append(tmp_class_map[check]["parent_type"]) + + # checked now has the breadth-first search result + if elemdefault is None: + for option in checked: + if "default" in tmp_class_map[option]: + elemdefault = tmp_class_map[option]["default"] + default_type = tmp_class_map[option] + break + + if elemdefault is not None: + # we now need to check whether there's a need to + # find out what the base type is for this type + # we really expect a linear chain here. + + # if we have a tuple as the type here, this means that + # the default was set at a level where there was not + # a single option for the type. check the default + # against each option, to get a to a single default_type + if isinstance(default_type, list): + default_type = find_absolute_default_type(default_type, elemdefault, + element.arg) + + if not default_type["base_type"]: + if "parent_type" in default_type: + if isinstance(default_type["parent_type"], list): + to_visit = [i for i in default_type["parent_type"]] + else: + to_visit = [default_type["parent_type"]] + checked = list() + while to_visit: + check = to_visit.pop(0) # remove from the top of stack - depth first + if check not in checked: + checked.append(check) + if "parent_type" in tmp_class_map[check]: + if isinstance(tmp_class_map[check]["parent_type"], list): + to_visit.extend(tmp_class_map[check]["parent_type"]) + else: + to_visit.append(tmp_class_map[check]["parent_type"]) + default_type = tmp_class_map[checked.pop()] + if not default_type["base_type"]: + raise TypeError("default type was not a base type") + + # Set the default type based on what was determined above about the + # correct value to set. + if default_type: + quote_arg = default_type["quote_arg"] if "quote_arg" in \ + default_type else False + default_type = default_type["native_type"] + + elemconfig = class_bool_map[element.search_one('config').arg] if \ + element.search_one('config') else True + + elemname = safe_name(element.arg) + + # Deal with the cases that there is a requirement to create a list - these + # are leaf lists. There is some special handling for leaf-lists to ensure + # that the references are correctly created. + if create_list: + if not cls == "leafref": + cls = "leaf-list" + + if isinstance(elemtype, list): + c = 0 + allowed_types = [] + for subtype in elemtype: + # nested union within a leaf-list type + if isinstance(subtype, tuple): + if subtype[0] == "leaf-union": + for subelemtype in subtype[1]["native_type"]: + allowed_types.append(subelemtype) + else: + if isinstance(subtype[1]["native_type"], list): + allowed_types.extend(subtype[1]["native_type"]) + else: + allowed_types.append(subtype[1]["native_type"]) + else: + allowed_types.append(subtype["native_type"]) + else: + allowed_types = elemtype["native_type"] + else: + cls = "leafref-list" + allowed_types = { + "native_type": elemtype["native_type"], + "referenced_path": elemtype["referenced_path"], + "require_instance": elemtype["require_instance"], + } + elemntype = {"class": cls, "native_type": ("TypedListType", + allowed_types)} + + else: + if cls == "union" or cls == "leaf-union": + elemtype = {"class": cls, "native_type": ("UnionType", elemtype)} + elemntype = elemtype["native_type"] + + # Build the dictionary for the element with the relevant meta-data + # specified within it. + elemdict = { + "name": elemname, "type": elemntype, + "origtype": element.search_one('type').arg, "path": + safe_name(path), + "class": cls, "default": elemdefault, + "config": elemconfig, "defaulttype": default_type, + "quote_arg": quote_arg, + "description": elemdescr, "yang_name": element.arg, + "choice": choice, + "register_paths": register_paths, + "namespace": namespace, + "defining_module": defining_module, + "restriction_dict": elemtype.get('restriction_dict'), + "restriction_type": elemtype.get('restriction_type'), + "restriction_argument": elemtype.get('restriction_argument'), + } + + if cls == "leafref": + elemdict["referenced_path"] = elemtype["referenced_path"] + elemdict["require_instance"] = elemtype["require_instance"] + + this_object.append(elemdict) + return this_object From 64eb0d5086072a26decd85ab44b328baf7229470 Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Thu, 24 May 2018 13:46:18 +0530 Subject: [PATCH 2/8] Update documentation --- lookup_plugins/yang2spec.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lookup_plugins/yang2spec.py b/lookup_plugins/yang2spec.py index 91122fe..2abd1a0 100644 --- a/lookup_plugins/yang2spec.py +++ b/lookup_plugins/yang2spec.py @@ -26,10 +26,10 @@ short_description: This plugin reads the content of given yang document and transforms it to a rules spec and configuration schema. description: - - This plugin parses yang document and transforms it into a spec which provides a set of rules - This rules spec can be used to validate the input configuration to check if it adheres - with respective yang model. It also outputs the configuration schema in json format and can - be used as reference to build input json configuration. + - This plugin parses yang document and transforms it into a spec which provides a set of rules + This rules spec can be used to validate the input configuration to check if it adheres + with respective yang model. It also outputs the configuration schema in json format and can + be used as reference to build input json configuration. options: _terms: description: The path points to the location of the top level yang module which @@ -38,7 +38,7 @@ search_path: description: - The path is a colon (:) separated list of directories to search for imported yang modules - in the yang file mentioned in C(path) option. If the value is not given it will search in + in the yang file mentioned in C(path) option. If the value is not given it will search in the current directory. required: false """ @@ -46,7 +46,7 @@ EXAMPLES = """ - name: Get interface yang spec set_fact: - interfaces_spec: "{{ lookup('yang2spec', 'openconfig/public/release/models/interfaces/openconfig-interfaces.yang', + interfaces_spec: "{{ lookup('yang2spec', 'openconfig/public/release/models/interfaces/openconfig-interfaces.yang', search_path='openconfig/public/release/models:pyang/modules/') }}" """ @@ -104,7 +104,7 @@ config_schema: description: The json configuration schema generated from yang document returned: success - type: dict + type: dict sample: | { "interfaces": { From 204efa7a09fb3067f04f546da5de82d383f5ad34 Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Thu, 24 May 2018 15:12:32 +0530 Subject: [PATCH 3/8] Fix CI issues --- lookup_plugins/yang2spec.py | 46 +++++++++++++------------------------ 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/lookup_plugins/yang2spec.py b/lookup_plugins/yang2spec.py index 2abd1a0..f333f8a 100644 --- a/lookup_plugins/yang2spec.py +++ b/lookup_plugins/yang2spec.py @@ -127,7 +127,6 @@ import os import sys import copy -import six import shutil import json import imp @@ -136,7 +135,7 @@ from ansible import constants as C from ansible.plugins.lookup import LookupBase -from ansible.module_utils.six import StringIO, iteritems, string_types +from ansible.module_utils.six import StringIO, iteritems, string_types, PY3 from ansible.module_utils.parsing.convert_bool import boolean from ansible.utils.path import unfrackpath, makedirs_safe from ansible.errors import AnsibleError @@ -172,7 +171,7 @@ def warning(msg): # Python3 support -if six.PY3: +if PY3: long = int unicode = str @@ -288,8 +287,7 @@ def warning(msg): "int8", "int16", "int32", "int64"] # The types that are built-in to YANG -YANG_BUILTIN_TYPES = list(class_map.keys()) + \ - ["container", "list", "rpc", "notification", "leafref"] +YANG_BUILTIN_TYPES = list(class_map.keys()) + ["container", "list", "rpc", "notification", "leafref"] YANG2SPEC_PLUGIN_PATH = "~/.ansible/tmp/yang2spec" @@ -476,7 +474,7 @@ def module_import_prefixes(ctx): for mod in ctx.modules: m = ctx.search_module(0, mod[0]) for importstmt in m.search('import'): - if not importstmt.arg in mod_ref_prefixes: + if importstmt.arg not in mod_ref_prefixes: mod_ref_prefixes[importstmt.arg] = [] mod_ref_prefixes[importstmt.arg].append(importstmt.search_one('prefix').arg) return mod_ref_prefixes @@ -521,7 +519,7 @@ def __init__(self, name): self.children = [] def add_prefix(self, prefix): - if not prefix in self._imported_prefixes: + if prefix not in self._imported_prefixes: self._imported_prefixes.append(prefix) def add_child(self, child): @@ -662,8 +660,7 @@ def build_spec(ctx, modules, fd): for e in ctx.errors: display.display("INFO: encountered %s" % str(e)) if not e[1] in ["UNUSED_IMPORT", "PATTERN_ERROR"]: - raise AnsibleError("FATAL: yang2spec cannot build module that pyang" + - " has found errors with.\n") + raise AnsibleError("FATAL: yang2spec cannot build module that pyang has found errors with.\n") # Determine all modules, and submodules that are needed, along with the # prefix that is used for it. We need to ensure that we understand all of the @@ -766,7 +763,7 @@ def build_identities(ctx, defnd): identity_dict[ident]["%s%s" % (spfx, ch.name)] = d identity_dict[identity.name]["%s%s" % (spfx, ch.name)] = d - if not identity.name in identity_dict: + if identity.name not in identity_dict: identity_dict[identity.name] = {} # Add entries to the class_map such that this identity can be referenced by @@ -1017,8 +1014,7 @@ def get_children(ctx, fd, i_children, module, parent, path=str(), # is and store this such that we can give a hint. keyval = False if parent.keyword == "list": - keyval = parent.search_one('key').arg if parent.search_one('key') \ - is not None else False + keyval = parent.search_one('key').arg if parent.search_one('key') is not None else False if keyval and " " in keyval: keyval = keyval.split(" ") else: @@ -1089,12 +1085,9 @@ def build_elemtype(ctx, et, prefix=False): # both in the case that a typedef needs to be built, as well as on per-list # basis. cls = None - pattern_stmt = et.search_one('pattern') if not et.search_one('pattern') \ - is None else False - range_stmt = et.search_one('range') if not et.search_one('range') \ - is None else False - length_stmt = et.search_one('length') if not et.search_one('length') \ - is None else False + pattern_stmt = et.search_one('pattern') if not et.search_one('pattern') is None else False + range_stmt = et.search_one('range') if not et.search_one('range') is None else False + length_stmt = et.search_one('length') if not et.search_one('length') is None else False # Determine whether there are any restrictions that are placed on this leaf, # and build a dictionary of the different restrictions to be placed on the @@ -1182,10 +1175,7 @@ def build_elemtype(ctx, et, prefix=False): path_stmt = et.search_one('path') if path_stmt is None: raise ValueError("leafref specified with no path statement") - require_instance = \ - class_bool_map[et.search_one('require-instance').arg] \ - if et.search_one('require-instance') \ - is not None else True + require_instance = class_bool_map[et.search_one('require-instance').arg] if et.search_one('require-instance') is not None else True elemtype = { "native_type": "unicode", @@ -1359,13 +1349,10 @@ def get_element(ctx, fd, element, module, parent, path, # Deal with specific cases for list - such as the key and how it is # ordered. if element.keyword == "list": - elemdict["key"] = safe_name(element.search_one("key").arg) \ - if element.search_one("key") is not None else False - elemdict["yang_keys"] = element.search_one("key").arg \ - if element.search_one("key") is not None else False + elemdict["key"] = safe_name(element.search_one("key").arg) if element.search_one("key") is not None else False + elemdict["yang_keys"] = element.search_one("key").arg if element.search_one("key") is not None else False user_ordered = element.search_one('ordered-by') - elemdict["user_ordered"] = True if user_ordered is not None \ - and user_ordered.arg.upper() == "USER" else False + elemdict["user_ordered"] = True if user_ordered is not None and user_ordered.arg.upper() == "USER" else False this_object.append(elemdict) has_children = True @@ -1373,8 +1360,7 @@ def get_element(ctx, fd, element, module, parent, path, if not has_children: if element.keyword in ["leaf-list"]: create_list = True - cls, elemtype = copy.deepcopy(build_elemtype(ctx, - element.search_one('type'))) + cls, elemtype = copy.deepcopy(build_elemtype(ctx, element.search_one('type'))) # Determine what the default for the leaf should be where there are # multiple available. From fe65430bb88c928016eaf4ea0d083abbacdda609 Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Thu, 31 May 2018 16:34:00 +0530 Subject: [PATCH 4/8] Fix review comments and add integration test --- lookup_plugins/yang2spec.py | 23 +- tests/yang2spec/test.retry | 1 + tests/yang2spec/test.yml | 4 + tests/yang2spec/yang2spec/defaults/main.yaml | 2 + .../yang2spec/files/ietf/ietf-interfaces.yang | 691 ++++++++++++ .../yang2spec/files/ietf/ietf-yang-types.yang | 474 +++++++++ .../interfaces/openconfig-interfaces.yang | 992 ++++++++++++++++++ .../openconfig/openconfig-extensions.yang | 91 ++ .../openconfig/types/openconfig-types.yang | 401 +++++++ .../types/openconfig-yang-types.yang | 156 +++ tests/yang2spec/yang2spec/meta/main.yaml | 3 + tests/yang2spec/yang2spec/tasks/basic.yaml | 48 + tests/yang2spec/yang2spec/tasks/main.yaml | 3 + 13 files changed, 2872 insertions(+), 17 deletions(-) create mode 100644 tests/yang2spec/test.retry create mode 100644 tests/yang2spec/test.yml create mode 100644 tests/yang2spec/yang2spec/defaults/main.yaml create mode 100644 tests/yang2spec/yang2spec/files/ietf/ietf-interfaces.yang create mode 100644 tests/yang2spec/yang2spec/files/ietf/ietf-yang-types.yang create mode 100644 tests/yang2spec/yang2spec/files/openconfig/interfaces/openconfig-interfaces.yang create mode 100644 tests/yang2spec/yang2spec/files/openconfig/openconfig-extensions.yang create mode 100644 tests/yang2spec/yang2spec/files/openconfig/types/openconfig-types.yang create mode 100644 tests/yang2spec/yang2spec/files/openconfig/types/openconfig-yang-types.yang create mode 100644 tests/yang2spec/yang2spec/meta/main.yaml create mode 100644 tests/yang2spec/yang2spec/tasks/basic.yaml create mode 100644 tests/yang2spec/yang2spec/tasks/main.yaml diff --git a/lookup_plugins/yang2spec.py b/lookup_plugins/yang2spec.py index f333f8a..2166902 100644 --- a/lookup_plugins/yang2spec.py +++ b/lookup_plugins/yang2spec.py @@ -1,23 +1,10 @@ -# -# Copyright 2018 Red Hat | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# (c) 2018 Red Hat, Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . from __future__ import (absolute_import, division, print_function) - __metaclass__ = type DOCUMENTATION = """ @@ -37,7 +24,7 @@ required: True search_path: description: - - The path is a colon (:) separated list of directories to search for imported yang modules + - is a colon C(:) separated list of directories to search for imported yang modules in the yang file mentioned in C(path) option. If the value is not given it will search in the current directory. required: false @@ -558,7 +545,9 @@ def __iter__(self): def build_store_from_definitions(self, ctx, defnd): unresolved_identities = list(defnd.keys()) - unresolved_identity_count = {k: 0 for k in defnd} + unresolved_identity_count = {} + for k in defnd: + unresolved_identity_count[k] = 0 error_ids = [] mod_ref_prefixes = module_import_prefixes(ctx) diff --git a/tests/yang2spec/test.retry b/tests/yang2spec/test.retry new file mode 100644 index 0000000..2fbb50c --- /dev/null +++ b/tests/yang2spec/test.retry @@ -0,0 +1 @@ +localhost diff --git a/tests/yang2spec/test.yml b/tests/yang2spec/test.yml new file mode 100644 index 0000000..4f3a769 --- /dev/null +++ b/tests/yang2spec/test.yml @@ -0,0 +1,4 @@ +- hosts: localhost + connection: local + roles: + - yang2spec diff --git a/tests/yang2spec/yang2spec/defaults/main.yaml b/tests/yang2spec/yang2spec/defaults/main.yaml new file mode 100644 index 0000000..25ef214 --- /dev/null +++ b/tests/yang2spec/yang2spec/defaults/main.yaml @@ -0,0 +1,2 @@ +yang_file: "{{ role_path }}/files/openconfig/interfaces/openconfig-interfaces.yang" +search_path: "{{ role_path }}/files/" diff --git a/tests/yang2spec/yang2spec/files/ietf/ietf-interfaces.yang b/tests/yang2spec/yang2spec/files/ietf/ietf-interfaces.yang new file mode 100644 index 0000000..62242c7 --- /dev/null +++ b/tests/yang2spec/yang2spec/files/ietf/ietf-interfaces.yang @@ -0,0 +1,691 @@ +module ietf-interfaces { + namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces"; + prefix if; + + import ietf-yang-types { + prefix yang; + } + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: + WG List: + + WG Chair: Thomas Nadeau + + + WG Chair: Juergen Schoenwaelder + + + Editor: Martin Bjorklund + "; + + description + "This module contains a collection of YANG definitions for + managing network interfaces. + + Copyright (c) 2014 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 7223; see + the RFC itself for full legal notices."; + + revision 2014-05-08 { + description + "Initial revision."; + reference + "RFC 7223: A YANG Data Model for Interface Management"; + } + + /* + * Typedefs + */ + + typedef interface-ref { + type leafref { + path "/if:interfaces/if:interface/if:name"; + } + description + "This type is used by data models that need to reference + configured interfaces."; + } + + typedef interface-state-ref { + type leafref { + path "/if:interfaces-state/if:interface/if:name"; + } + description + "This type is used by data models that need to reference + the operationally present interfaces."; + } + + /* + * Identities + */ + + identity interface-type { + description + "Base identity from which specific interface types are + derived."; + } + + /* + * Features + */ + + feature arbitrary-names { + description + "This feature indicates that the device allows user-controlled + interfaces to be named arbitrarily."; + } + feature pre-provisioning { + description + "This feature indicates that the device supports + pre-provisioning of interface configuration, i.e., it is + possible to configure an interface whose physical interface + hardware is not present on the device."; + } + + feature if-mib { + description + "This feature indicates that the device implements + the IF-MIB."; + reference + "RFC 2863: The Interfaces Group MIB"; + } + + /* + * Configuration data nodes + */ + + container interfaces { + description + "Interface configuration parameters."; + + list interface { + key "name"; + + description + "The list of configured interfaces on the device. + + The operational state of an interface is available in the + /interfaces-state/interface list. If the configuration of a + system-controlled interface cannot be used by the system + (e.g., the interface hardware present does not match the + interface type), then the configuration is not applied to + the system-controlled interface shown in the + /interfaces-state/interface list. If the configuration + of a user-controlled interface cannot be used by the system, + the configured interface is not instantiated in the + /interfaces-state/interface list."; + + leaf name { + type string; + description + "The name of the interface. + + A device MAY restrict the allowed values for this leaf, + possibly depending on the type of the interface. + For system-controlled interfaces, this leaf is the + device-specific name of the interface. The 'config false' + list /interfaces-state/interface contains the currently + existing interfaces on the device. + + If a client tries to create configuration for a + system-controlled interface that is not present in the + /interfaces-state/interface list, the server MAY reject + the request if the implementation does not support + pre-provisioning of interfaces or if the name refers to + an interface that can never exist in the system. A + NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case. + + If the device supports pre-provisioning of interface + configuration, the 'pre-provisioning' feature is + advertised. + + If the device allows arbitrarily named user-controlled + interfaces, the 'arbitrary-names' feature is advertised. + + When a configured user-controlled interface is created by + the system, it is instantiated with the same name in the + /interface-state/interface list."; + } + + leaf description { + type string; + description + "A textual description of the interface. + + A server implementation MAY map this leaf to the ifAlias + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifAlias. The definition of + such a mechanism is outside the scope of this document. + + Since ifAlias is defined to be stored in non-volatile + storage, the MIB implementation MUST map ifAlias to the + value of 'description' in the persistently stored + datastore. + + Specifically, if the device supports ':startup', when + ifAlias is read the device MUST return the value of + 'description' in the 'startup' datastore, and when it is + written, it MUST be written to the 'running' and 'startup' + datastores. Note that it is up to the implementation to + + decide whether to modify this single leaf in 'startup' or + perform an implicit copy-config from 'running' to + 'startup'. + + If the device does not support ':startup', ifAlias MUST + be mapped to the 'description' leaf in the 'running' + datastore."; + reference + "RFC 2863: The Interfaces Group MIB - ifAlias"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + description + "The type of the interface. + + When an interface entry is created, a server MAY + initialize the type leaf with a valid value, e.g., if it + is possible to derive the type from the name of the + interface. + + If a client tries to set the type of an interface to a + value that can never be used by the system, e.g., if the + type is not supported or if the type does not match the + name of the interface, the server MUST reject the request. + A NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf enabled { + type boolean; + default "true"; + description + "This leaf contains the configured, desired state of the + interface. + + Systems that implement the IF-MIB use the value of this + leaf in the 'running' datastore to set + IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry + has been initialized, as described in RFC 2863. + + Changes in this leaf in the 'running' datastore are + reflected in ifAdminStatus, but if ifAdminStatus is + changed over SNMP, this leaf is not affected."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf link-up-down-trap-enable { + if-feature if-mib; + type enumeration { + enum enabled { + value 1; + } + enum disabled { + value 2; + } + } + description + "Controls whether linkUp/linkDown SNMP notifications + should be generated for this interface. + + If this node is not configured, the value 'enabled' is + operationally used by the server for interfaces that do + not operate on top of any other interface (i.e., there are + no 'lower-layer-if' entries), and 'disabled' otherwise."; + reference + "RFC 2863: The Interfaces Group MIB - + ifLinkUpDownTrapEnable"; + } + } + } + + /* + * Operational state data nodes + */ + + container interfaces-state { + config false; + description + "Data nodes for the operational state of interfaces."; + + list interface { + key "name"; + + description + "The list of interfaces on the device. + + System-controlled interfaces created by the system are + always present in this list, whether they are configured or + not."; + + leaf name { + type string; + description + "The name of the interface. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + description + "The type of the interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + mandatory true; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + mandatory true; + description + "The current operational state of the interface. + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + mandatory true; + description + "The ifIndex value for the ifEntry represented by this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-state-ref; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-state-ref; + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted, and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted, and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted, and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + } + } +} diff --git a/tests/yang2spec/yang2spec/files/ietf/ietf-yang-types.yang b/tests/yang2spec/yang2spec/files/ietf/ietf-yang-types.yang new file mode 100644 index 0000000..ee58fa3 --- /dev/null +++ b/tests/yang2spec/yang2spec/files/ietf/ietf-yang-types.yang @@ -0,0 +1,474 @@ +module ietf-yang-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types"; + prefix "yang"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: + WG List: + + WG Chair: David Kessens + + + WG Chair: Juergen Schoenwaelder + + + Editor: Juergen Schoenwaelder + "; + + description + "This module contains a collection of generally useful derived + YANG data types. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - yang-identifier + - hex-string + - uuid + - dotted-quad"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of counter and gauge types ***/ + + typedef counter32 { + type uint32; + description + "The counter32 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter32 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter32 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter32. + + In the value set and its semantics, this type is equivalent + to the Counter32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter32 { + type yang:counter32; + default "0"; + description + "The zero-based-counter32 type represents a counter32 + that has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter32 textual convention of the SMIv2."; + reference + "RFC 4502: Remote Network Monitoring Management Information + Base Version 2"; + } + + typedef counter64 { + type uint64; + description + "The counter64 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter64 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter64 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter64. + + In the value set and its semantics, this type is equivalent + to the Counter64 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter64 { + type yang:counter64; + default "0"; + description + "The zero-based-counter64 type represents a counter64 that + has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter64 textual convention of the SMIv2."; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + typedef gauge32 { + type uint32; + description + "The gauge32 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^32-1 (4294967295 decimal), and + the minimum value cannot be smaller than 0. The value of + a gauge32 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge32 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the Gauge32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef gauge64 { + type uint64; + description + "The gauge64 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^64-1 (18446744073709551615), and + the minimum value cannot be smaller than 0. The value of + a gauge64 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge64 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the CounterBasedGauge64 SMIv2 textual convention defined + in RFC 2856"; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + /*** collection of identifier-related types ***/ + + typedef object-identifier { + type string { + pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))' + + '(\.(0|([1-9]\d*)))*'; + } + description + "The object-identifier type represents administratively + assigned names in a registration-hierarchical-name tree. + + Values of this type are denoted as a sequence of numerical + non-negative sub-identifier values. Each sub-identifier + value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers + are separated by single dots and without any intermediate + whitespace. + + The ASN.1 standard restricts the value space of the first + sub-identifier to 0, 1, or 2. Furthermore, the value space + of the second sub-identifier is restricted to the range + 0 to 39 if the first sub-identifier is 0 or 1. Finally, + the ASN.1 standard requires that an object identifier + has always at least two sub-identifiers. The pattern + captures these restrictions. + + Although the number of sub-identifiers is not limited, + module designers should realize that there may be + implementations that stick with the SMIv2 limit of 128 + sub-identifiers. + + This type is a superset of the SMIv2 OBJECT IDENTIFIER type + since it is not restricted to 128 sub-identifiers. Hence, + this type SHOULD NOT be used to represent the SMIv2 OBJECT + IDENTIFIER type; the object-identifier-128 type SHOULD be + used instead."; + reference + "ISO9834-1: Information technology -- Open Systems + Interconnection -- Procedures for the operation of OSI + Registration Authorities: General procedures and top + arcs of the ASN.1 Object Identifier tree"; + } + + typedef object-identifier-128 { + type object-identifier { + pattern '\d*(\.\d*){1,127}'; + } + description + "This type represents object-identifiers restricted to 128 + sub-identifiers. + + In the value set and its semantics, this type is equivalent + to the OBJECT IDENTIFIER type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef yang-identifier { + type string { + length "1..max"; + pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*'; + pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*'; + } + description + "A YANG identifier string as defined by the 'identifier' + rule in Section 12 of RFC 6020. An identifier must + start with an alphabetic character or an underscore + followed by an arbitrary sequence of alphabetic or + numeric characters, underscores, hyphens, or dots. + + A YANG identifier MUST NOT start with any possible + combination of the lowercase or uppercase character + sequence 'xml'."; + reference + "RFC 6020: YANG - A Data Modeling Language for the Network + Configuration Protocol (NETCONF)"; + } + + /*** collection of types related to date and time***/ + + typedef date-and-time { + type string { + pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?' + + '(Z|[\+\-]\d{2}:\d{2})'; + } + description + "The date-and-time type is a profile of the ISO 8601 + standard for representation of dates and times using the + Gregorian calendar. The profile is defined by the + date-time production in Section 5.6 of RFC 3339. + + The date-and-time type is compatible with the dateTime XML + schema type with the following notable exceptions: + + (a) The date-and-time type does not allow negative years. + + (b) The date-and-time time-offset -00:00 indicates an unknown + time zone (see RFC 3339) while -00:00 and +00:00 and Z + all represent the same time zone in dateTime. + + (c) The canonical format (see below) of data-and-time values + differs from the canonical format used by the dateTime XML + schema type, which requires all times to be in UTC using + the time-offset 'Z'. + + This type is not equivalent to the DateAndTime textual + convention of the SMIv2 since RFC 3339 uses a different + separator between full-date and full-time and provides + higher resolution of time-secfrac. + + The canonical format for date-and-time values with a known time + zone uses a numeric time zone offset that is calculated using + the device's configured known offset to UTC time. A change of + the device's offset to UTC time will cause date-and-time values + to change accordingly. Such changes might happen periodically + in case a server follows automatically daylight saving time + (DST) time zone offset changes. The canonical format for + date-and-time values with an unknown time zone (usually + referring to the notion of local time) uses the time-offset + -00:00."; + reference + "RFC 3339: Date and Time on the Internet: Timestamps + RFC 2579: Textual Conventions for SMIv2 + XSD-TYPES: XML Schema Part 2: Datatypes Second Edition"; + } + + typedef timeticks { + type uint32; + description + "The timeticks type represents a non-negative integer that + represents the time, modulo 2^32 (4294967296 decimal), in + hundredths of a second between two epochs. When a schema + node is defined that uses this type, the description of + the schema node identifies both of the reference epochs. + + In the value set and its semantics, this type is equivalent + to the TimeTicks type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef timestamp { + type yang:timeticks; + description + "The timestamp type represents the value of an associated + timeticks schema node at which a specific occurrence + happened. The specific occurrence must be defined in the + description of any schema node defined using this type. When + the specific occurrence occurred prior to the last time the + associated timeticks attribute was zero, then the timestamp + value is zero. Note that this requires all timestamp values + to be reset to zero when the value of the associated timeticks + attribute reaches 497+ days and wraps around to zero. + + The associated timeticks schema node must be specified + in the description of any schema node using this type. + + In the value set and its semantics, this type is equivalent + to the TimeStamp textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of generic address types ***/ + + typedef phys-address { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "Represents media- or physical-level addresses represented + as a sequence octets, each octet represented by two hexadecimal + numbers. Octets are separated by colons. The canonical + representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the PhysAddress textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + typedef mac-address { + type string { + pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'; + } + description + "The mac-address type represents an IEEE 802 MAC address. + The canonical representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the MacAddress textual convention of the SMIv2."; + reference + "IEEE 802: IEEE Standard for Local and Metropolitan Area + Networks: Overview and Architecture + RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of XML-specific types ***/ + + typedef xpath1.0 { + type string; + description + "This type represents an XPATH 1.0 expression. + + When a schema node is defined that uses this type, the + description of the schema node MUST specify the XPath + context in which the XPath expression is evaluated."; + reference + "XPATH: XML Path Language (XPath) Version 1.0"; + } + + /*** collection of string types ***/ + + typedef hex-string { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + description + "A hexadecimal string with octets represented as hex digits + separated by colons. The canonical representation uses + lowercase characters."; + } + + typedef uuid { + type string { + pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'; + } + description + "A Universally Unique IDentifier in the string representation + defined in RFC 4122. The canonical representation uses + lowercase characters. + + The following is an example of a UUID in string representation: + f81d4fae-7dec-11d0-a765-00a0c91e6bf6 + "; + reference + "RFC 4122: A Universally Unique IDentifier (UUID) URN + Namespace"; + } + + typedef dotted-quad { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'; + } + description + "An unsigned 32-bit number expressed in the dotted-quad + notation, i.e., four octets written as decimal numbers + and separated with the '.' (full stop) character."; + } +} diff --git a/tests/yang2spec/yang2spec/files/openconfig/interfaces/openconfig-interfaces.yang b/tests/yang2spec/yang2spec/files/openconfig/interfaces/openconfig-interfaces.yang new file mode 100644 index 0000000..4b791e3 --- /dev/null +++ b/tests/yang2spec/yang2spec/files/openconfig/interfaces/openconfig-interfaces.yang @@ -0,0 +1,992 @@ +module openconfig-interfaces { + + yang-version "1"; + + // namespace + namespace "http://openconfig.net/yang/interfaces"; + + prefix "oc-if"; + + // import some basic types + import ietf-interfaces { prefix ietf-if; } + import openconfig-yang-types { prefix oc-yang; } + import openconfig-types { prefix oc-types; } + import openconfig-extensions { prefix oc-ext; } + + // meta + organization "OpenConfig working group"; + + contact + "OpenConfig working group + netopenconfig@googlegroups.com"; + + description + "Model for managing network interfaces and subinterfaces. This + module also defines convenience types / groupings for other + models to create references to interfaces: + + base-interface-ref (type) - reference to a base interface + interface-ref (grouping) - container for reference to a + interface + subinterface + interface-ref-state (grouping) - container for read-only + (opstate) reference to interface + subinterface + + This model reuses data items defined in the IETF YANG model for + interfaces described by RFC 7223 with an alternate structure + (particularly for operational state data) and with + additional configuration items. + + Portions of this code were derived from IETF RFC 7223. + Please reproduce this note if possible. + + IETF code is subject to the following copyright and license: + Copyright (c) IETF Trust and the persons identified as authors of + the code. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, is permitted pursuant to, and subject to the license + terms contained in, the Simplified BSD License set forth in + Section 4.c of the IETF Trust's Legal Provisions Relating + to IETF Documents (http://trustee.ietf.org/license-info)."; + + oc-ext:openconfig-version "2.3.0"; + + revision "2018-01-05" { + description + "Add logical loopback to interface."; + reference "2.3.0"; + } + + revision "2017-12-22" { + description + "Add IPv4 proxy ARP configuration."; + reference "2.2.0"; + } + + revision "2017-12-21" { + description + "Added IPv6 router advertisement configuration."; + reference "2.1.0"; + } + + revision "2017-07-14" { + description + "Added Ethernet/IP state data; Add dhcp-client; + migrate to OpenConfig types modules; Removed or + renamed opstate values"; + reference "2.0.0"; + } + + revision "2017-04-03" { + description + "Update copyright notice."; + reference "1.1.1"; + } + + revision "2016-12-22" { + description + "Fixes to Ethernet interfaces model"; + reference "1.1.0"; + } + + + // typedef statements + + typedef base-interface-ref { + type leafref { + path "/oc-if:interfaces/oc-if:interface/oc-if:name"; + } + description + "Reusable type for by-name reference to a base interface. + This type may be used in cases where ability to reference + a subinterface is not required."; + } + + typedef interface-id { + type string; + description + "User-defined identifier for an interface, generally used to + name a interface reference. The id can be arbitrary but a + useful convention is to use a combination of base interface + name and subinterface index."; + } + + // grouping statements + + grouping interface-ref-common { + description + "Reference leafrefs to interface / subinterface"; + + leaf interface { + type leafref { + path "/oc-if:interfaces/oc-if:interface/oc-if:name"; + } + description + "Reference to a base interface. If a reference to a + subinterface is required, this leaf must be specified + to indicate the base interface."; + } + + leaf subinterface { + type leafref { + path "/oc-if:interfaces/" + + "oc-if:interface[oc-if:name=current()/../interface]/" + + "oc-if:subinterfaces/oc-if:subinterface/oc-if:index"; + } + description + "Reference to a subinterface -- this requires the base + interface to be specified using the interface leaf in + this container. If only a reference to a base interface + is requuired, this leaf should not be set."; + } + } + + grouping interface-ref-state-container { + description + "Reusable opstate w/container for a reference to an + interface or subinterface"; + + container state { + config false; + description + "Operational state for interface-ref"; + + uses interface-ref-common; + } + } + + grouping interface-ref { + description + "Reusable definition for a reference to an interface or + subinterface"; + + container interface-ref { + description + "Reference to an interface or subinterface"; + + container config { + description + "Configured reference to interface / subinterface"; + + uses interface-ref-common; + } + + uses interface-ref-state-container; + } + } + + grouping interface-ref-state { + description + "Reusable opstate w/container for a reference to an + interface or subinterface"; + + container interface-ref { + description + "Reference to an interface or subinterface"; + + uses interface-ref-state-container; + } + } + + grouping base-interface-ref-state { + description + "Reusable opstate w/container for a reference to a + base interface (no subinterface)."; + + container state { + config false; + description + "Operational state for base interface reference"; + + leaf interface { + type base-interface-ref; + description + "Reference to a base interface."; + } + } + } + + + grouping interface-common-config { + description + "Configuration data data nodes common to physical interfaces + and subinterfaces"; + + leaf description { + type string; + description + "A textual description of the interface. + + A server implementation MAY map this leaf to the ifAlias + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifAlias. The definition of + such a mechanism is outside the scope of this document. + + Since ifAlias is defined to be stored in non-volatile + storage, the MIB implementation MUST map ifAlias to the + value of 'description' in the persistently stored + datastore. + + Specifically, if the device supports ':startup', when + ifAlias is read the device MUST return the value of + 'description' in the 'startup' datastore, and when it is + written, it MUST be written to the 'running' and 'startup' + datastores. Note that it is up to the implementation to + + decide whether to modify this single leaf in 'startup' or + perform an implicit copy-config from 'running' to + 'startup'. + + If the device does not support ':startup', ifAlias MUST + be mapped to the 'description' leaf in the 'running' + datastore."; + reference + "RFC 2863: The Interfaces Group MIB - ifAlias"; + } + + leaf enabled { + type boolean; + default "true"; + description + "This leaf contains the configured, desired state of the + interface. + + Systems that implement the IF-MIB use the value of this + leaf in the 'running' datastore to set + IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry + has been initialized, as described in RFC 2863. + + Changes in this leaf in the 'running' datastore are + reflected in ifAdminStatus, but if ifAdminStatus is + changed over SNMP, this leaf is not affected."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + } + + grouping interface-phys-config { + description + "Configuration data for physical interfaces"; + + leaf name { + type string; + description + "The name of the interface. + + A device MAY restrict the allowed values for this leaf, + possibly depending on the type of the interface. + For system-controlled interfaces, this leaf is the + device-specific name of the interface. The 'config false' + list interfaces/interface[name]/state contains the currently + existing interfaces on the device. + + If a client tries to create configuration for a + system-controlled interface that is not present in the + corresponding state list, the server MAY reject + the request if the implementation does not support + pre-provisioning of interfaces or if the name refers to + an interface that can never exist in the system. A + NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case. + + The IETF model in RFC 7223 provides YANG features for the + following (i.e., pre-provisioning and arbitrary-names), + however they are omitted here: + + If the device supports pre-provisioning of interface + configuration, the 'pre-provisioning' feature is + advertised. + + If the device allows arbitrarily named user-controlled + interfaces, the 'arbitrary-names' feature is advertised. + + When a configured user-controlled interface is created by + the system, it is instantiated with the same name in the + /interfaces/interface[name]/state list."; + } + + leaf type { + type identityref { + base ietf-if:interface-type; + } + mandatory true; + description + "The type of the interface. + + When an interface entry is created, a server MAY + initialize the type leaf with a valid value, e.g., if it + is possible to derive the type from the name of the + interface. + + If a client tries to set the type of an interface to a + value that can never be used by the system, e.g., if the + type is not supported or if the type does not match the + name of the interface, the server MUST reject the request. + A NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf mtu { + type uint16; + description + "Set the max transmission unit size in octets + for the physical interface. If this is not set, the mtu is + set to the operational default -- e.g., 1514 bytes on an + Ethernet interface."; + } + + leaf loopback-mode { + type boolean; + default false; + description + "When set to true, the interface is logically looped back, + such that packets that are forwarded via the interface + are received on the same interface."; + } + + uses interface-common-config; + } + + grouping interface-phys-holdtime-config { + description + "Configuration data for interface hold-time settings -- + applies to physical interfaces."; + + leaf up { + type uint32; + units milliseconds; + default 0; + description + "Dampens advertisement when the interface + transitions from down to up. A zero value means dampening + is turned off, i.e., immediate notification."; + } + + leaf down { + type uint32; + units milliseconds; + default 0; + description + "Dampens advertisement when the interface transitions from + up to down. A zero value means dampening is turned off, + i.e., immediate notification."; + } + } + + grouping interface-phys-holdtime-state { + description + "Operational state data for interface hold-time."; + } + + grouping interface-phys-holdtime-top { + description + "Top-level grouping for setting link transition + dampening on physical and other types of interfaces."; + + container hold-time { + description + "Top-level container for hold-time settings to enable + dampening advertisements of interface transitions."; + + container config { + description + "Configuration data for interface hold-time settings."; + + uses interface-phys-holdtime-config; + } + + container state { + + config false; + + description + "Operational state data for interface hold-time."; + + uses interface-phys-holdtime-config; + uses interface-phys-holdtime-state; + } + } + } + + grouping interface-common-state { + description + "Operational state data (in addition to intended configuration) + at the global level for this interface"; + + leaf ifindex { + type uint32; + description + "System assigned number for each interface. Corresponds to + ifIndex object in SNMP Interface MIB"; + reference + "RFC 2863 - The Interfaces Group MIB"; + } + + leaf admin-status { + type enumeration { + enum UP { + description + "Ready to pass packets."; + } + enum DOWN { + description + "Not ready to pass packets and not in some test mode."; + } + enum TESTING { + //TODO: This is generally not supported as a configured + //admin state, though it's in the standard interfaces MIB. + //Consider removing it. + description + "In some test mode."; + } + } + //TODO:consider converting to an identity to have the + //flexibility to remove some values defined by RFC 7223 that + //are not used or not implemented consistently. + mandatory true; + description + "The desired state of the interface. In RFC 7223 this leaf + has the same read semantics as ifAdminStatus. Here, it + reflects the administrative state as set by enabling or + disabling the interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum UP { + value 1; + description + "Ready to pass packets."; + } + enum DOWN { + value 2; + description + "The interface does not pass any packets."; + } + enum TESTING { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum UNKNOWN { + value 4; + description + "Status cannot be determined for some reason."; + } + enum DORMANT { + value 5; + description + "Waiting for some external event."; + } + enum NOT_PRESENT { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum LOWER_LAYER_DOWN { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + //TODO:consider converting to an identity to have the + //flexibility to remove some values defined by RFC 7223 that + //are not used or not implemented consistently. + mandatory true; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type oc-types:timeticks64; + units nanoseconds; + description + "This timestamp indicates the time of the last state change + of the interface (e.g., up-to-down transition). This + corresponds to the ifLastChange object in the standard + interface MIB. + + The value is the timestamp in nanoseconds relative to + the Unix Epoch (Jan 1, 1970 00:00:00 UTC)."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + } + + + grouping interface-counters-state { + description + "Operational state representing interface counters + and statistics."; + + //TODO: we may need to break this list of counters into those + //that would appear for physical vs. subinterface or logical + //interfaces. For now, just replicating the full stats + //grouping to both interface and subinterface. + + container counters { + description + "A collection of interface-related statistics objects."; + + leaf in-octets { + type oc-yang:counter64; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type oc-yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type oc-yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type oc-yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type oc-yang:counter64; + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + + + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type oc-yang:counter64; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type oc-yang:counter64; + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf in-fcs-errors { + type oc-yang:counter64; + description + "Number of received packets which had errors in the + frame check sequence (FCS), i.e., framing errors. + + Discontinuities in the value of this counter can occur + when the device is re-initialization as indicated by the + value of 'last-clear'."; + } + + leaf out-octets { + type oc-yang:counter64; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type oc-yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted, and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type oc-yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted, and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + + leaf out-multicast-pkts { + type oc-yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted, and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type oc-yang:counter64; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type oc-yang:counter64; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system, and at + other times as indicated by the value of + 'last-clear'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + + leaf carrier-transitions { + type oc-yang:counter64; + description + "Number of times the interface state has transitioned + between up and down since the time the device restarted + or the last-clear time, whichever is most recent."; + } + + leaf last-clear { + type oc-types:timeticks64; + units nanoseconds; + description + "Timestamp of the last time the interface counters were + cleared. + + The value is the timestamp in nanoseconds relative to + the Unix Epoch (Jan 1, 1970 00:00:00 UTC)."; + } + } + } + + // data definition statements + + grouping sub-unnumbered-config { + description + "Configuration data for unnumbered subinterfaces"; + + leaf enabled { + type boolean; + default false; + description + "Indicates that the subinterface is unnumbered. By default + the subinterface is numbered, i.e., expected to have an + IP address configuration."; + } + } + + grouping sub-unnumbered-state { + description + "Operational state data unnumbered subinterfaces"; + } + + grouping sub-unnumbered-top { + description + "Top-level grouping unnumbered subinterfaces"; + + container unnumbered { + description + "Top-level container for setting unnumbered interfaces. + Includes reference the interface that provides the + address information"; + + container config { + description + "Configuration data for unnumbered interface"; + + uses sub-unnumbered-config; + } + + container state { + + config false; + + description + "Operational state data for unnumbered interfaces"; + + uses sub-unnumbered-config; + uses sub-unnumbered-state; + } + + uses oc-if:interface-ref; + } + } + + grouping subinterfaces-config { + description + "Configuration data for subinterfaces"; + + leaf index { + type uint32; + default 0; + description + "The index of the subinterface, or logical interface number. + On systems with no support for subinterfaces, or not using + subinterfaces, this value should default to 0, i.e., the + default subinterface."; + } + + uses interface-common-config; + + } + + grouping subinterfaces-state { + description + "Operational state data for subinterfaces"; + + leaf name { + type string; + description + "The system-assigned name for the sub-interface. This MAY + be a combination of the base interface name and the + subinterface index, or some other convention used by the + system."; + } + + uses interface-common-state; + uses interface-counters-state; + } + + grouping subinterfaces-top { + description + "Subinterface data for logical interfaces associated with a + given interface"; + + container subinterfaces { + description + "Enclosing container for the list of subinterfaces associated + with a physical interface"; + + list subinterface { + key "index"; + + description + "The list of subinterfaces (logical interfaces) associated + with a physical interface"; + + leaf index { + type leafref { + path "../config/index"; + } + description + "The index number of the subinterface -- used to address + the logical interface"; + } + + container config { + description + "Configurable items at the subinterface level"; + + uses subinterfaces-config; + } + + container state { + + config false; + description + "Operational state data for logical interfaces"; + + uses subinterfaces-config; + uses subinterfaces-state; + } + } + } + } + + grouping interfaces-top { + description + "Top-level grouping for interface configuration and + operational state data"; + + container interfaces { + description + "Top level container for interfaces, including configuration + and state data."; + + + list interface { + key "name"; + + description + "The list of named interfaces on the device."; + + leaf name { + type leafref { + path "../config/name"; + } + description + "References the configured name of the interface"; + //TODO: need to consider whether this should actually + //reference the name in the state subtree, which + //presumably would be the system-assigned name, or the + //configured name. Points to the config/name now + //because of YANG 1.0 limitation that the list + //key must have the same "config" as the list, and + //also can't point to a non-config node. + } + + container config { + description + "Configurable items at the global, physical interface + level"; + + uses interface-phys-config; + } + + container state { + + config false; + description + "Operational state data at the global interface level"; + + uses interface-phys-config; + uses interface-common-state; + uses interface-counters-state; + } + + uses interface-phys-holdtime-top; + uses subinterfaces-top; + } + } + } + + uses interfaces-top; + + +} diff --git a/tests/yang2spec/yang2spec/files/openconfig/openconfig-extensions.yang b/tests/yang2spec/yang2spec/files/openconfig/openconfig-extensions.yang new file mode 100644 index 0000000..f39ecf6 --- /dev/null +++ b/tests/yang2spec/yang2spec/files/openconfig/openconfig-extensions.yang @@ -0,0 +1,91 @@ +module openconfig-extensions { + + yang-version "1"; + + // namespace + namespace "http://openconfig.net/yang/openconfig-ext"; + + prefix "oc-ext"; + + // meta + organization "OpenConfig working group"; + + contact + "OpenConfig working group + www.openconfig.net"; + + description + "This module provides extensions to the YANG language to allow + OpenConfig specific functionality and meta-data to be defined."; + + revision "2017-04-11" { + description + "rename password type to 'hashed' and clarify description"; + reference "0.3.0"; + } + + revision "2017-01-29" { + description + "Added extension for annotating encrypted values."; + reference "0.2.0"; + } + + revision "2015-10-09" { + description + "Initial OpenConfig public release"; + reference "0.1.0"; + } + + + // extension statements + extension openconfig-version { + argument "semver" { + yin-element false; + } + description + "The OpenConfig version number for the module. This is + expressed as a semantic version number of the form: + x.y.z + where: + * x corresponds to the major version, + * y corresponds to a minor version, + * z corresponds to a patch version. + This version corresponds to the model file within which it is + defined, and does not cover the whole set of OpenConfig models. + Where several modules are used to build up a single block of + functionality, the same module version is specified across each + file that makes up the module. + + A major version number of 0 indicates that this model is still + in development (whether within OpenConfig or with industry + partners), and is potentially subject to change. + + Following a release of major version 1, all modules will + increment major revision number where backwards incompatible + changes to the model are made. + + The minor version is changed when features are added to the + model that do not impact current clients use of the model. + + The patch-level version is incremented when non-feature changes + (such as bugfixes or clarifications to human-readable + descriptions that do not impact model functionality) are made + that maintain backwards compatibility. + + The version number is stored in the module meta-data."; + } + + extension openconfig-hashed-value { + description + "This extension provides an annotation on schema nodes to + indicate that the corresponding value should be stored and + reported in hashed form. + + Hash algorithms are by definition not reversible. Clients + reading the configuration or applied configuration for the node + should expect to receive only the hashed value. Values written + in cleartext will be hashed. This annotation may be used on + nodes such as secure passwords in which the device never reports + a cleartext value, even if the input is provided as cleartext."; + } +} diff --git a/tests/yang2spec/yang2spec/files/openconfig/types/openconfig-types.yang b/tests/yang2spec/yang2spec/files/openconfig/types/openconfig-types.yang new file mode 100644 index 0000000..978c78e --- /dev/null +++ b/tests/yang2spec/yang2spec/files/openconfig/types/openconfig-types.yang @@ -0,0 +1,401 @@ +module openconfig-types { + yang-version "1"; + + namespace "http://openconfig.net/yang/openconfig-types"; + + prefix "oc-types"; + + // import statements + import openconfig-extensions { prefix oc-ext; } + + // meta + organization + "OpenConfig working group"; + + contact + "OpenConfig working group + netopenconfig@googlegroups.com"; + + description + "This module contains a set of general type definitions that + are used across OpenConfig models. It can be imported by modules + that make use of these types."; + + oc-ext:openconfig-version "0.4.0"; + + revision "2018-01-16" { + description + "Add interval to min/max/avg stats; add percentage stat"; + reference "0.4.0"; + } + + revision "2017-08-16" { + description + "Apply fix for ieetfloat32 length parameter"; + reference "0.3.3"; + } + + revision "2017-01-13" { + description + "Add ADDRESS_FAMILY identity"; + reference "0.3.2"; + } + + revision "2016-11-14" { + description + "Correct length of ieeefloat32"; + reference "0.3.1"; + } + + revision "2016-11-11" { + description + "Additional types - ieeefloat32 and routing-password"; + reference "0.3.0"; + } + + revision "2016-05-31" { + description + "OpenConfig public release"; + reference "0.2.0"; + } + + typedef percentage { + type uint8 { + range "0..100"; + } + description + "Integer indicating a percentage value"; + } + + typedef std-regexp { + type string; + description + "This type definition is a placeholder for a standard + definition of a regular expression that can be utilised in + OpenConfig models. Further discussion is required to + consider the type of regular expressions that are to be + supported. An initial proposal is POSIX compatible."; + } + + typedef timeticks64 { + type uint64; + description + "This type is based on the timeticks type defined in + RFC 6991, but with 64-bit width. It represents the time, + modulo 2^64, in hundredths of a second between two epochs."; + reference + "RFC 6991 - Common YANG Data Types"; + } + + typedef ieeefloat32 { + type binary { + length "4"; + } + description + "An IEEE 32-bit floating point number. The format of this number + is of the form: + 1-bit sign + 8-bit exponent + 23-bit fraction + The floating point value is calculated using: + (-1)**S * 2**(Exponent-127) * (1+Fraction)"; + } + + typedef routing-password { + type string; + description + "This type is indicative of a password that is used within + a routing protocol which can be returned in plain text to the + NMS by the local system. Such passwords are typically stored + as encrypted strings. Since the encryption used is generally + well known, it is possible to extract the original value from + the string - and hence this format is not considered secure. + Leaves specified with this type should not be modified by + the system, and should be returned to the end-user in plain + text. This type exists to differentiate passwords, which + may be sensitive, from other string leaves. It could, for + example, be used by the NMS to censor this data when + viewed by particular users."; + } + + typedef stat-interval { + type uint64; + units nanoseconds; + description + "A time interval over which a set of statistics is computed. + A common usage is to report the interval over which + avg/min/max stats are computed and reported."; + } + + grouping stat-interval-state { + description + "Reusable leaf definition for stats computation interval"; + + leaf interval { + type oc-types:stat-interval; + description + "The time interval over which the min/max/average statistics + are computed by the system."; + } + } + + grouping avg-min-max-stats-precision1 { + description + "Common nodes for recording average, minimum, and + maximum values for a statistic. These values all have + fraction-digits set to 1."; + + leaf avg { + type decimal64 { + fraction-digits 1; + } + description + "The arithmetic mean value of the statistic over the + sampling period."; + } + + leaf min { + type decimal64 { + fraction-digits 1; + } + description + "The minimum value of the statistic over the sampling + period"; + } + + leaf max { + type decimal64 { + fraction-digits 1; + } + description + "The maximum value of the statitic over the sampling + period"; + } + + uses stat-interval-state; + } + + grouping avg-min-max-instant-stats-precision1 { + description + "Common grouping for recording an instantaneous statistic value + in addition to avg-min-max stats"; + + leaf instant { + type decimal64 { + fraction-digits 1; + } + description + "The instantaneous value of the statistic."; + } + + uses avg-min-max-stats-precision1; + } + + grouping avg-min-max-instant-stats-precision2-dB { + description + "Common grouping for recording dB values with 2 decimal + precision. Values include the instantaneous, average, + minimum, and maximum statistics"; + + leaf instant { + type decimal64 { + fraction-digits 2; + } + units dB; + description + "The instantaneous value of the statistic."; + } + + leaf avg { + type decimal64 { + fraction-digits 2; + } + units dB; + description + "The arithmetic mean value of the statistic over the + sampling period."; + } + + leaf min { + type decimal64 { + fraction-digits 2; + } + units dB; + description + "The minimum value of the statistic over the sampling + period"; + } + + leaf max { + type decimal64 { + fraction-digits 2; + } + units dB; + description + "The maximum value of the statistic over the sampling + period"; + } + + uses stat-interval-state; + } + + grouping avg-min-max-instant-stats-precision2-dBm { + description + "Common grouping for recording dBm values with 2 decimal + precision. Values include the instantaneous, average, + minimum, and maximum statistics"; + + leaf instant { + type decimal64 { + fraction-digits 2; + } + units dBm; + description + "The instantaneous value of the statistic."; + } + + leaf avg { + type decimal64 { + fraction-digits 2; + } + units dBm; + description + "The arithmetic mean value of the statistic over the + sampling period."; + } + + leaf min { + type decimal64 { + fraction-digits 2; + } + units dBm; + description + "The minimum value of the statistic over the sampling + period"; + } + + leaf max { + type decimal64 { + fraction-digits 2; + } + units dBm; + description + "The maximum value of the statistic over the sampling + period"; + } + + uses stat-interval-state; + } + + grouping avg-min-max-instant-stats-precision2-mA { + description + "Common grouping for recording mA values with 2 decimal + precision. Values include the instantaneous, average, + minimum, and maximum statistics"; + + leaf instant { + type decimal64 { + fraction-digits 2; + } + units mA; + description + "The instantaneous value of the statistic."; + } + + leaf avg { + type decimal64 { + fraction-digits 2; + } + units mA; + description + "The arithmetic mean value of the statistic over the + sampling period."; + } + + leaf min { + type decimal64 { + fraction-digits 2; + } + units mA; + description + "The minimum value of the statistic over the sampling + period"; + } + + leaf max { + type decimal64 { + fraction-digits 2; + } + units mA; + description + "The maximum value of the statistic over the sampling + period"; + } + + uses stat-interval-state; + } + + grouping avg-min-max-instant-stats-pct { + description + "Common grouping for percentage statistics."; + + leaf instant { + type oc-types:percentage; + description + "The instantaneous percentage value."; + } + + leaf avg { + type oc-types:percentage; + description + "The arithmetic mean value of the percentage measure of the + statistic over the sampling period."; + } + + leaf min { + type oc-types:percentage; + description + "The minimum value of the percentage measure of the + statistic over the sampling period"; + } + + leaf max { + type oc-types:percentage; + description + "The maximum value of the percentage measure of the + statistic over the sampling period"; + } + + uses stat-interval-state; + } + + identity ADDRESS_FAMILY { + description + "A base identity for all address families"; + } + + identity IPV4 { + base ADDRESS_FAMILY; + description + "The IPv4 address family"; + } + + identity IPV6 { + base ADDRESS_FAMILY; + description + "The IPv6 address family"; + } + + identity MPLS { + base ADDRESS_FAMILY; + description + "The MPLS address family"; + } + + identity L2_ETHERNET { + base ADDRESS_FAMILY; + description + "The 802.3 Ethernet address family"; + } + +} diff --git a/tests/yang2spec/yang2spec/files/openconfig/types/openconfig-yang-types.yang b/tests/yang2spec/yang2spec/files/openconfig/types/openconfig-yang-types.yang new file mode 100644 index 0000000..749fac4 --- /dev/null +++ b/tests/yang2spec/yang2spec/files/openconfig/types/openconfig-yang-types.yang @@ -0,0 +1,156 @@ +module openconfig-yang-types { + + yang-version "1"; + namespace "http://openconfig.net/yang/types/yang"; + prefix "oc-yang"; + + import openconfig-extensions { prefix "oc-ext"; } + + organization + "OpenConfig working group"; + + contact + "OpenConfig working group + www.openconfig.net"; + + description + "This module contains a set of extension types to the + YANG builtin types that are used across multiple + OpenConfig models. + + Portions of this code were derived from IETF RFC 6021. + Please reproduce this note if possible. + + IETF code is subject to the following copyright and license: + Copyright (c) IETF Trust and the persons identified as authors of + the code. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, is permitted pursuant to, and subject to the license + terms contained in, the Simplified BSD License set forth in + Section 4.c of the IETF Trust's Legal Provisions Relating + to IETF Documents (http://trustee.ietf.org/license-info)."; + + oc-ext:openconfig-version "0.1.2"; + + revision 2017-07-30 { + description + "Fixed unprintable character"; + reference "0.1.2"; + } + + revision 2017-04-03 { + description + "Update copyright notice."; + reference "0.1.1"; + } + + revision 2017-01-26 { + description + "Initial module for inet types"; + reference "0.1.0"; + } + + typedef dotted-quad { + type string { + pattern '^(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|' + + '25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4]' + + '[0-9]|25[0-5])$'; + } + description + "An unsigned 32-bit integer expressed as a dotted quad. The + format is four octets written as decimal numbers separated + with a period character."; + } + + typedef hex-string { + type string { + pattern '^[0-9a-fA-F]*$'; + } + description + "A string consisting of a hexadecimal characters."; + } + + typedef counter32 { + type uint32; + description + + "A 32-bit counter. A counter value is a monotonically increasing + value which is used to express a count of a number of + occurrences of a particular event or entity. When the counter + reaches its maximum value, in this case 2^32-1, it wraps to 0. + + Discontinuities in the counter are generally triggered only when + the counter is reset to zero."; + } + + typedef counter64 { + type uint64; + description + + "A 64-bit counter. A counter value is a monotonically increasing + value which is used to express a count of a number of + occurrences of a particular event or entity. When a counter64 + reaches its maximum value, 2^64-1, it loops to zero. + Discontinuities in a counter are generally triggered only when + the counter is reset to zero, through operator or system + intervention."; + } + + typedef date-and-time { + type string { + pattern + '^[0-9]{4}\-[0-9]{2}\-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}' + + '(\.[0-9]+)?Z[+-][0-9]{2}:[0-9]{2}$'; + } + description + "A date and time, expressed in the format described in RFC3339. + That is to say: + + YYYY-MM-DDTHH:MM:SSZ+-hh:mm + + where YYYY is the year, MM is the month expressed as a two-digit + month (zero padding if required), DD is the day of the month, + expressed as a two digit value. T is the literal character 'T', + HH is the hour of the day expressed as a two digit number, using + the 24-hour clock, MM is the minute of the hour expressed as a + two digit number. Z is the literal character 'Z', followed by a + timezone offset expressed in hours (hh) and minutes (mm), both + expressed as two digit numbers. The time offset is specified as + a positive or negative offset to UTC using the '+' or '-' + character preceding the offset. + + Optionally, fractional seconds can be expressed after the minute + of the hour as a decimal number of unspecified precision + reflecting fractions of a second."; + reference + "RFC3339 - Date and Time on the Internet: Timestamps"; + } + + typedef gauge64 { + type uint64; + description + "A gauge value may increase or decrease - and reflects a value + at a particular point in time. If the value of the variable + being modeled using the gauge exceeds its maximum - 2^64-1 in + this case - the gauge is set to its maximum value."; + } + + typedef phys-address { + type string { + pattern '^([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?$'; + } + description + "A physical layer address, expressed as a series of pairs of + hexadecimal digits."; + } + + typedef mac-address { + type string { + pattern '^[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}$'; + } + description + "An IEEE 802 MAC address"; + } +} diff --git a/tests/yang2spec/yang2spec/meta/main.yaml b/tests/yang2spec/yang2spec/meta/main.yaml new file mode 100644 index 0000000..d64f158 --- /dev/null +++ b/tests/yang2spec/yang2spec/meta/main.yaml @@ -0,0 +1,3 @@ +--- +dependencies: + - ../../../network-engine diff --git a/tests/yang2spec/yang2spec/tasks/basic.yaml b/tests/yang2spec/yang2spec/tasks/basic.yaml new file mode 100644 index 0000000..080aa41 --- /dev/null +++ b/tests/yang2spec/yang2spec/tasks/basic.yaml @@ -0,0 +1,48 @@ +--- +- name: Convert interface yang module to spec + set_fact: + interface_spec: "{{ lookup('yang2spec', yang_file, search_path=search_path) }}" + +- assert: + that: + - "'config_schema' in interface_spec" + - "interface_spec['config_schema']['interfaces']['interface']['config'][0]['loopback_mode'] == false" + - "interface_spec['config_schema']['interfaces']['interface']['config'][0]['enabled'] == true" + - "interface_spec['config_schema']['interfaces']['interface']['config'][0]['description'] == None" + - "interface_spec['config_schema']['interfaces']['interface']['config'][0]['mtu'] == None" + - "interface_spec['config_schema']['interfaces']['interface']['config'][0]['name'] == None" + - "interface_spec['config_schema']['interfaces']['interface']['config'][0]['type'] == None" + - "interface_spec['config_schema']['interfaces']['interface']['hold_time'][0]['config']['down'] == 0" + - "interface_spec['config_schema']['interfaces']['interface']['hold_time'][0]['config']['up'] == 0" + - "interface_spec['config_schema']['interfaces']['interface']['subinterfaces'][0]['subinterface']['config'][0]['index'] == 0" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['description']['description'] is defined" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['description']['type'] == 'str'" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['enabled']['default'] == 'true'" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['enabled']['type'] == 'boolean'" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['mtu']['restriction']['int_size'] == 16" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['mtu']['restriction']['max'] == 65535" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['mtu']['restriction']['min'] == 0" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['config']['suboptions']['mtu']['type'] == 'int'" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['suboptions_elements'] == 'dict'" + - "interface_spec['spec']['options']['interfaces']['suboptions']['interface']['suboptions']['suboptions_type'] == 'list'" + +- name: Wrong yang module path + set_fact: + interface_spec: "{{ lookup('yang2spec', '/test', search_path=search_path) }}" + ignore_errors: True + register: result + +- assert: + that: + - "'invalid file path' in result.msg" + + +- name: Wrong module search path + set_fact: + interface_spec: "{{ lookup('yang2spec', yang_file, search_path='/test') }}" + ignore_errors: True + register: result + +- assert: + that: + - "'invalid directory path' in result.msg" diff --git a/tests/yang2spec/yang2spec/tasks/main.yaml b/tests/yang2spec/yang2spec/tasks/main.yaml new file mode 100644 index 0000000..bc8cc63 --- /dev/null +++ b/tests/yang2spec/yang2spec/tasks/main.yaml @@ -0,0 +1,3 @@ +--- +- name: yang2spec test + import_tasks: basic.yaml From f43a70ab58bc60272989701367185542d781151e Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Thu, 31 May 2018 16:47:47 +0530 Subject: [PATCH 5/8] Update readme --- README.md | 2 ++ docs/user_guide/README.md | 1 + tests/yang2spec/test.retry | 1 - 3 files changed, 3 insertions(+), 1 deletion(-) delete mode 100644 tests/yang2spec/test.retry diff --git a/README.md b/README.md index 6f35f10..b8bdfe2 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,8 @@ The following is a list of plugins that are provided by this role. * `json_template` [[source]](https://github.com/ansible-network/network-engine/blob/devel/lookup_plugins/json_template.py) * `network_template` [[source]](https://github.com/ansible-network/network-engine/blob/devel/lookup_plugins/network_template.py) * `netcfg_diff` [[source]](https://github.com/ansible-network/network-engine/blob/devel/lookup_plugins/netcfg_diff.py) +* `yang2spec` [[source]](https://github.com/ansible-network/network-engine/blob/devel/lookup_plugins/yang2spec.py) + ### Filter diff --git a/docs/user_guide/README.md b/docs/user_guide/README.md index 59c9d21..023a4a7 100644 --- a/docs/user_guide/README.md +++ b/docs/user_guide/README.md @@ -31,6 +31,7 @@ Additional Resources * [README](https://galaxy.ansible.com/ansible-network/network-engine/#readme) * [command_parser tests](https://github.com/ansible-network/network-engine/tree/devel/tests/command_parser) * [textfsm_parser tests](https://github.com/ansible-network/network-engine/tree/devel/tests/textfsm_parser) +* [yang2spec tests](https://github.com/ansible-network/network-engine/tree/devel/tests/yang2spec) * [Full changelog diff](https://github.com/ansible-network/network-engine/blob/devel/CHANGELOG.rst) Contributing and Reporting Feedback diff --git a/tests/yang2spec/test.retry b/tests/yang2spec/test.retry deleted file mode 100644 index 2fbb50c..0000000 --- a/tests/yang2spec/test.retry +++ /dev/null @@ -1 +0,0 @@ -localhost From ad02fa41fa421eb30b6032b1dc1c6066d901e98c Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Wed, 6 Jun 2018 11:33:40 +0530 Subject: [PATCH 6/8] Import yang2spec testcase in main entry file --- tests/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test.yml b/tests/test.yml index f1da297..568e64c 100644 --- a/tests/test.yml +++ b/tests/test.yml @@ -9,3 +9,4 @@ - import_playbook: vlan_compress/test.yml - import_playbook: vlan_expand/test.yml - import_playbook: netcfg_diff/test.yml +- import_playbook: yang2spec/test.yml From 60a9275fbfb9757b4d799e0417772f346c588aa4 Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Wed, 6 Jun 2018 11:58:23 +0530 Subject: [PATCH 7/8] Add pyang to test requirements --- test-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/test-requirements.txt b/test-requirements.txt index 3930480..b9b1d1b 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1 +1,2 @@ flake8 +pyang From eac486360994e7388cff9d6ecc4bb1ae2f8313d8 Mon Sep 17 00:00:00 2001 From: "Ganesh B. Nalawade" Date: Wed, 6 Jun 2018 17:33:44 +0530 Subject: [PATCH 8/8] Fix py3 CI failures --- lookup_plugins/yang2spec.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lookup_plugins/yang2spec.py b/lookup_plugins/yang2spec.py index 2166902..968b3f5 100644 --- a/lookup_plugins/yang2spec.py +++ b/lookup_plugins/yang2spec.py @@ -987,9 +987,7 @@ def get_children(ctx, fd, i_children, module, parent, path=str(), parent_descr = parent.search_one('description') if parent_descr is not None: - ansible_spec_header['description'] = parent_descr.arg.decode('utf8').encode('ascii', - 'ignore').strip().replace( - '\n', ' ') + ansible_spec_header['description'] = parent_descr.arg.strip().replace('\n\n', '\n') ansible_spec_header['short_description'] = ansible_spec_header['description'].split('.')[0] else: ansible_spec_header['description'] = "" @@ -1047,7 +1045,7 @@ def get_children(ctx, fd, i_children, module, parent, path=str(), spec['default'] = i['default'] if i.get('description'): - spec['description'] = i["description"].decode('utf-8').encode('ascii', 'ignore').replace('\n', ' ') + spec['description'] = i["description"] if keyval and i['yang_name'] in keyval: spec['required'] = True