implemented topological sort

This commit is contained in:
Stasik0 2016-07-22 13:22:13 +02:00 committed by Stefan Profanter
parent d0efc73622
commit 6a91d84bcb
No known key found for this signature in database
GPG Key ID: 400F1D46162772CA
5 changed files with 184 additions and 119 deletions

View File

@ -79,32 +79,26 @@ if(UA_ENABLE_NODEMANAGEMENT)
endif()
if(UA_BUILD_EXAMPLES_NODESET_COMPILER)
if(BUILD_SHARED_LIBS)
message(FATAL_ERROR "The nodeset compiler currently requires static linking to access internal API")
endif()
if(BUILD_SHARED_LIBS)
message(FATAL_ERROR "The nodeset compiler currently requires static linking to access internal API")
endif()
# example information model from nodeset xml
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/src_generated/nodeset.h ${PROJECT_BINARY_DIR}/src_generated/nodeset.c
PRE_BUILD
COMMAND ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/tools/pyUANamespace/generate_open62541CCode.py
-i ${PROJECT_SOURCE_DIR}/tools/pyUANamespace/NodeID_Blacklist_FullNS0.txt
--high-level-api
${PROJECT_SOURCE_DIR}/tools/schema/namespace0/Opc.Ua.NodeSet2.xml
${PROJECT_SOURCE_DIR}/examples/server_nodeset.xml
#${PROJECT_SOURCE_DIR}/examples/Opc.ISA95.NodeSet2.xml
#${PROJECT_SOURCE_DIR}/examples/Opc.Ua.AMLBaseTypes.NodeSet2.xml
#${PROJECT_SOURCE_DIR}/examples/Opc.Ua.AMLLibraries.NodeSet2.xml
#${PROJECT_SOURCE_DIR}/examples/Opc.Ua.Di.NodeSet2.xml
#${PROJECT_SOURCE_DIR}/examples/Opc.Ua.Adi.NodeSet2.xml
${PROJECT_BINARY_DIR}/src_generated/nodeset
DEPENDS ${PROJECT_SOURCE_DIR}/tools/pyUANamespace/generate_open62541CCode.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/open62541_MacroHelper.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_builtin_types.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_constants.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_namespace.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_node_types.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/NodeID_Blacklist_FullNS0.txt
${PROJECT_SOURCE_DIR}/examples/server_nodeset.xml)
# example information model from nodeset xml
add_custom_command(OUTPUT ${PROJECT_BINARY_DIR}/src_generated/nodeset.h ${PROJECT_BINARY_DIR}/src_generated/nodeset.c
PRE_BUILD
COMMAND ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/tools/pyUANamespace/generate_open62541CCode.py
-i ${PROJECT_SOURCE_DIR}/tools/pyUANamespace/NodeID_Blacklist_FullNS0.txt
${PROJECT_SOURCE_DIR}/tools/schema/namespace0/Opc.Ua.NodeSet2.xml
${PROJECT_SOURCE_DIR}/examples/server_nodeset.xml
${PROJECT_BINARY_DIR}/src_generated/nodeset
DEPENDS ${PROJECT_SOURCE_DIR}/tools/pyUANamespace/generate_open62541CCode.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/open62541_MacroHelper.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_builtin_types.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_constants.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_namespace.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/ua_node_types.py
${PROJECT_SOURCE_DIR}/tools/pyUANamespace/NodeID_Blacklist_FullNS0.txt
${PROJECT_SOURCE_DIR}/examples/server_nodeset.xml)
add_executable(server_nodeset server_nodeset.c ${PROJECT_BINARY_DIR}/src_generated/nodeset.c $<TARGET_OBJECTS:open62541-object>)
target_link_libraries(server_nodeset ${LIBS})

View File

@ -21,23 +21,25 @@
###
from __future__ import print_function
from ua_namespace import *
import logging
import argparse
from os.path import basename
from ua_namespace import *
from open62541_XMLPreprocessor import open62541_XMLPreprocessor
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description="""Parse OPC UA NamespaceXML file(s) and create C code for generating nodes in open62541
description="""Parse OPC UA NodeSetXML file(s) and create C code for generating nodes in open62541
generate_open62541CCode.py will first read all XML files passed on the command line, then link and check the namespace. All nodes that fulfill the basic requirements will then be printed as C-Code intended to be included in the open62541 OPC UA Server that will initialize the corresponding namespace.""",
generate_open62541CCode.py will first read all XML files passed on the command
line, then link and check the nodeset. All nodes that fulfill the basic
requirements will then be printed as C-Code intended to be included in the
open62541 OPC UA Server that will initialize the corresponding nodeset.""",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('infiles',
metavar="<namespaceXML>",
metavar="<nodeSetXML>",
nargs='+',
type=argparse.FileType('r'),
help='Namespace XML file(s). Note that the last definition of a node encountered will be used and all prior definitions are discarded.')
help='NodeSet XML file(s). Note that the last definition of a node encountered will be used and all prior definitions are discarded.')
parser.add_argument('outputFile',
metavar='<outputFile>',
#type=argparse.FileType('w', 0),
@ -55,7 +57,7 @@ parser.add_argument('-b','--blacklist',
action='append',
dest="blacklistFiles",
default=[],
help='Loads a list of NodeIDs stored in blacklistFile (one NodeID per line). Any of the nodeIds encountered in this file will be removed from the namespace prior to compilation. Any references to these nodes will also be removed')
help='Loads a list of NodeIDs stored in blacklistFile (one NodeID per line). Any of the nodeIds encountered in this file will be removed from the nodeset prior to compilation. Any references to these nodes will also be removed')
parser.add_argument('-s','--suppress',
metavar="<attribute>",
action='append',
@ -71,34 +73,28 @@ parser.add_argument('-v','--verbose', action='count', help='Make the script more
args = parser.parse_args()
level = logging.CRITICAL
# Set up logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
verbosity = 0
if args.verbose:
verbosity = int(args.verbose)
if (verbosity==1):
level = logging.ERROR
logging.basicConfig(level=logging.ERROR)
elif (verbosity==2):
level = logging.WARNING
logging.basicConfig(level=logging.WARNING)
elif (verbosity==3):
level = logging.INFO
logging.basicConfig(level=logging.INFO)
elif (verbosity>=4):
level = logging.DEBUG
logging.basicConfig(level=level)
logger.setLevel(logging.INFO)
# Creating the header is tendious. We can skip the entire process if
# the header exists.
#if path.exists(argv[-1]+".c") or path.exists(argv[-1]+".h"):
# log(None, "File " + str(argv[-1]) + " does already exists.", LOG_LEVEL_INFO)
# log(None, "Header generation will be skipped. Delete the header and rerun this script if necessary.", LOG_LEVEL_INFO)
# exit(0)
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.CRITICAL)
# Open the output file
outfileh = open(args.outputFile+".h", r"w+")
outfilec = open(args.outputFile+".c", r"w+")
# Create a new namespace. Note that the namespace name is not significant.
# Create a new nodeset. The nodeset name is not significant.
ns = opcua_namespace("open62541")
# Clean up the XML files by removing duplicate namespaces and unwanted prefixes
@ -118,10 +114,7 @@ namespaceArrayNames = preProc.getUsedNamespaceArrayNames()
for key in namespaceArrayNames:
ns.addNamespace(key, namespaceArrayNames[key])
# Remove any temp files - they are not needed after the AST is created
preProc.removePreprocessedFiles()
# Remove blacklisted nodes from the namespace
# Remove blacklisted nodes from the nodeset
# Doing this now ensures that unlinkable pointers will be cleanly removed
# during sanitation.
for blacklist in args.blacklistFiles:
@ -134,7 +127,7 @@ for blacklist in args.blacklistFiles:
ns.removeNodeById(line)
blacklist.close()
# Link the references in the namespace
# Link the references in the nodeset
logger.info("Linking namespace nodes and references")
ns.linkOpenPointers()
@ -165,7 +158,7 @@ for ignore in args.ignoreFiles:
line = line.replace(" ","")
id = line.replace("\n","")
if ns.getNodeByIDString(id) == None:
logger.warn("Can't ignore node, Namespace does currently not contain a node with id " + str(id))
logger.warn("Can't ignore node, NodeSet does currently not contain a node with id " + str(id))
else:
ignoreNodes.append(ns.getNodeByIDString(id))
ignore.close()
@ -173,14 +166,14 @@ for ignore in args.ignoreFiles:
# Create the C Code
logger.info("Generating Header")
# Returns a tuple of (["Header","lines"],["Code","lines","generated"])
from os.path import basename
generatedCode = ns.printOpen62541Header(ignoreNodes, args.suppressedAttributes, outfilename=basename(args.outputFile), high_level_api=args.high_level_api)
generatedCode = ns.printOpen62541Header(ignoreNodes, args.suppressedAttributes, outfilename=basename(args.outputFile))
for line in generatedCode[0]:
outfileh.write(line+"\n")
print(line, end='\n', file=outfileh)
for line in generatedCode[1]:
outfilec.write(line+"\n")
print(line, end='\n', file=outfilec)
outfilec.close()
outfileh.close()
logger.info("Namespace generation code successfully printed")
logger.info("NodeSet generation code successfully printed")

View File

@ -37,9 +37,9 @@ class open62541_MacroHelper():
def getCreateExpandedNodeIDMacro(self, node):
if node.id().i != None:
return "UA_EXPANDEDNODEID_NUMERIC(%s, %s)" % (node.id().ns, node.id().i)
return "UA_EXPANDEDNODEID_NUMERIC(%s, %s)" % (str(node.id().ns),str(node.id().i))
elif node.id().s != None:
return "UA_EXPANDEDNODEID_STRING(%s, %s)" % (node.id().ns, node.id().s)
return "UA_EXPANDEDNODEID_STRING(%s, %s)" % (str(node.id().ns), node.id().s)
elif node.id().b != None:
logger.debug("NodeID Generation macro for bytestrings has not been implemented.")
return ""
@ -86,13 +86,15 @@ class open62541_MacroHelper():
symbolic_name = symbolic_name+"_"+str(extendedN)
defined_typealiases.append(symbolic_name)
return "#define UA_NS%sID_%s %s" % (node.id().ns, symbolic_name.upper(), node.id().i)
code.append("#define UA_NS%sID_%s %s" % (str(node.id().ns), symbolic_name.upper(),str(node.id().i)))
return code
def getCreateNodeIDMacro(self, node):
if node.id().i != None:
return "UA_NODEID_NUMERIC(%s, %s)" % (node.id().ns, node.id().i)
return "UA_NODEID_NUMERIC(%s, %s)" % (str(node.id().ns),str(node.id().i))
elif node.id().s != None:
return "UA_NODEID_STRING(%s, %s)" % (node.id().ns, node.id().s)
return "UA_NODEID_STRING(%s, %s)" % (str(node.id().ns), node.id().s)
elif node.id().b != None:
logger.debug("NodeID Generation macro for bytestrings has not been implemented.")
return ""
@ -107,6 +109,8 @@ class open62541_MacroHelper():
if reference.isForward():
code.append("UA_Server_addReference(server, %s, %s, %s, true);" % (self.getCreateNodeIDMacro(sourcenode), self.getCreateNodeIDMacro(reference.referenceType()), self.getCreateExpandedNodeIDMacro(reference.target())))
else:
code.append("UA_Server_addReference(server, %s, %s, %s, false);" % (self.getCreateNodeIDMacro(sourcenode), self.getCreateNodeIDMacro(reference.referenceType()), self.getCreateExpandedNodeIDMacro(reference.target())))
return code
def getCreateNodeNoBootstrap(self, node, parentNode, parentReference, unprintedNodes=[]):

View File

@ -20,6 +20,8 @@
###
import logging
logger = logging.getLogger(__name__)
from ua_constants import *
import tempfile
import xml.dom.minidom as dom
@ -31,8 +33,6 @@ import re
from ua_namespace import opcua_node_id_t
logger = logging.getLogger(__name__)
class preProcessDocument:
originXML = '' # Original XML passed to the preprocessor
targetXML = () # tuple of (fileHandle, fileName)
@ -72,14 +72,10 @@ class preProcessDocument:
return None
def extractNamespaceURIs(self):
""" extractNamespaceURIs
minidom gobbles up <NamespaceUris></NamespaceUris> elements, without a decent
""" minidom gobbles up <NamespaceUris></NamespaceUris> elements, without a decent
way to reliably access this dom2 <uri></uri> elements (only attribute xmlns= are
accessible using minidom). We need them for dereferencing though... This
function attempts to do just that.
returns: Nothing
"""
infile = open(self.originXML)
foundURIs = False
@ -105,12 +101,8 @@ class preProcessDocument:
infile.close()
def analyze(self):
""" analyze()
analyze will gather information about the nodes and references contained in a XML File
""" analyze will gather information about the nodes and references contained in a XML File
to facilitate later preprocessing stages that adresss XML dependency issues
returns: No return value
"""
nodeIds = []
ns = self.nodeset.getElementsByTagName("UANodeSet")
@ -139,9 +131,7 @@ class preProcessDocument:
logger.debug("Nodes: " + str(len(self.containedNodes)) + " References: " + str(len(self.referencedNodes)))
def getNamespaceId(self):
""" namespaceId()
Counts the namespace IDs in all nodes of this XML and picks the most used
""" Counts the namespace IDs in all nodes of this XML and picks the most used
namespace as the numeric identifier of this data model.
returns: Integer ID of the most propable/most used namespace in this XML
@ -160,24 +150,18 @@ class preProcessDocument:
if idDict[entry] > max:
max = idDict[entry]
namespaceIdGuessed = entry
#logger.debug("XML Contents are propably in namespace " + str(entry) + " (used by " + str(idDict[entry]) + " Nodes)")
return namespaceIdGuessed
def getReferencedNamespaceUri(self, nsId):
""" getReferencedNamespaceUri
returns an URL that hopefully corresponds to the nsId that was used to reference this model
return: URI string corresponding to nsId
"""
""" Returns an URL that hopefully corresponds to the nsId that was used to reference this model """
# Might be the more reliable method: Get the URI from the xmlns attributes (they have numers)
if len(self.namespaceOrder) > 0:
for el in self.namespaceOrder:
if el[0] == nsId:
return el[1]
# Fallback:
# Some models do not have xmlns:sX attributes, but still <URI>s (usually when they only reference NS0)
# Fallback: Some models do not have xmlns:sX attributes, but still <URI>s
# (usually when they only reference NS0)
if len(self.referencedNamesSpaceUris) > 0 and len(self.referencedNamesSpaceUris) >= nsId-1:
return self.referencedNamesSpaceUris[nsId-1]
@ -201,9 +185,7 @@ class preProcessDocument:
os.close(outfile)
def reassignReferencedNamespaceId(self, currentNsId, newNsId):
""" reassignReferencedNamespaceId
Iterates over all references in this document, find references to currentNsId and changes them to newNsId.
""" Iterates over all references in this document, find references to currentNsId and changes them to newNsId.
NodeIds themselves are not altered.
returns: nothing
@ -215,9 +197,7 @@ class preProcessDocument:
refNd[0].toString()
def reassignNamespaceId(self, currentNsId, newNsId):
""" reassignNamespaceId
Iterates over all nodes in this document, find those in namespace currentNsId and changes them to newNsId.
""" Iterates over all nodes in this document, find those in namespace currentNsId and changes them to newNsId.
returns: nothing
"""
@ -243,18 +223,12 @@ class preProcessDocument:
nd[0].toString()
class open62541_XMLPreprocessor:
preProcDocuments = []
def __init__(self):
self.preProcDocuments = []
def addDocument(self, documentPath):
self.preProcDocuments.append(preProcessDocument(documentPath))
def removePreprocessedFiles(self):
for doc in self.preProcDocuments:
doc.clean()
def getPreProcessedFiles(self):
files = []
for doc in self.preProcDocuments:
@ -263,9 +237,7 @@ class open62541_XMLPreprocessor:
return files
def testModelCongruencyAgainstReferences(self, doc, refs):
""" testModelCongruencyAgainstReferences
Counts how many of the nodes referenced in refs can be found in the model
""" Counts how many of the nodes referenced in refs can be found in the model
doc.
returns: double corresponding to the percentage of hits
@ -373,28 +345,20 @@ class open62541_XMLPreprocessor:
logger.error("Failed to find a match for what " + os.path.basename(doc.originXML) + " refers to as ns=" + str(d))
def preprocessAll(self):
##
## First: Gather statistics about the namespaces:
# Gather statistics about the namespaces:
for doc in self.preProcDocuments:
doc.analyze()
# Preprocess step: Remove XML specific Naming scheme ("uax:")
# FIXME: Not implemented
##
## Preprocess step: Check namespace ID multiplicity and reassign IDs if necessary
##
# Check namespace ID multiplicity and reassign IDs if necessary
self.preprocess_assignUniqueNsIds()
self.preprocess_linkDependantModels()
# Prep step: prevent any XML from using namespace 1 (reserved for instances)
# FIXME: Not implemented
##
## Prep step: prevent any XML from using namespace 1 (reserved for instances)
## FIXME: Not implemented
##
## Final: Write modified XML tmp files
# Final: Write modified XML tmp files
for doc in self.preProcDocuments:
doc.finalize()
return True

View File

@ -25,6 +25,8 @@ import sys
from time import struct_time, strftime, strptime, mktime
from struct import pack as structpack
from collections import deque
import logging
from ua_builtin_types import *;
from ua_node_types import *;
@ -442,16 +444,124 @@ class opcua_namespace():
return tdNodes
def printOpen62541Header(self, printedExternally=[], supressGenerationOfAttribute=[], outfilename="", high_level_api=False):
def printDotGraphWalk(self, depth=1, filename="out.dot", rootNode=None, followInverse = False, excludeNodeIds=[]):
""" Outputs a graphiz/dot description the nodes centered around rootNode.
References beginning from rootNode will be followed for depth steps. If
"followInverse = True" is passed, then inverse (not Forward) references
will also be followed.
Nodes can be excluded from the graph by passing a list of NodeIds as
string representation using excludeNodeIds (ex ["i=53", "ns=2;i=453"]).
Output is written into filename to be parsed by dot/neato/srfp...
"""
iter = depth
processed = []
if rootNode == None or \
not isinstance(rootNode, opcua_node_t) or \
not rootNode in self.nodes:
root = self.getRoot()
else:
root = rootNode
file=open(filename, 'w+')
if root == None:
return
file.write("digraph ns {\n")
file.write(root.printDot())
refs=[]
if followInverse == True:
refs = root.getReferences(); # + root.getInverseReferences()
else:
for ref in root.getReferences():
if ref.isForward():
refs.append(ref)
while iter > 0:
tmp = []
for ref in refs:
if isinstance(ref.target(), opcua_node_t):
tgt = ref.target()
if not str(tgt.id()) in excludeNodeIds:
if not tgt in processed:
file.write(tgt.printDot())
processed.append(tgt)
if ref.isForward() == False and followInverse == True:
tmp = tmp + tgt.getReferences(); # + tgt.getInverseReferences()
elif ref.isForward() == True :
tmp = tmp + tgt.getReferences();
refs = tmp
iter = iter - 1
file.write("}\n")
file.close()
def getSubTypesOf2(self, node):
re = [node]
for ref in node.getReferences():
if isinstance(ref.target(), opcua_node_t):
if ref.referenceType().displayName() == "HasSubtype" and ref.isForward():
re = re + self.getSubTypesOf2(ref.target())
return re
def reorderNodesMinDependencies(self, printedExternally):
#Kahn's algorithm
#https://algocoding.wordpress.com/2015/04/05/topological-sorting-python/
relevant_types = ["HierarchicalReferences", "HasComponent"]
temp = []
for t in relevant_types:
temp = temp + self.getSubTypesOf2(self.getNodeByBrowseName(t))
relevant_types = temp
in_degree = { u : 0 for u in self.nodes } # determine in-degree
for u in self.nodes: # of each node
if u not in printedExternally:
for ref in u.getReferences():
if isinstance(ref.target(), opcua_node_t):
if(ref.referenceType() in relevant_types and ref.isForward()):
in_degree[ref.target()] += 1
Q = deque() # collect nodes with zero in-degree
for u in in_degree:
if in_degree[u] == 0:
Q.appendleft(u)
L = [] # list for order of nodes
while Q:
u = Q.pop() # choose node of zero in-degree
L.append(u) # and 'remove' it from graph
for ref in u.getReferences():
if isinstance(ref.target(), opcua_node_t):
if(ref.referenceType() in relevant_types and ref.isForward()):
in_degree[ref.target()] -= 1
if in_degree[ref.target()] == 0:
Q.appendleft(ref.target())
if len(L) == len(self.nodes):
self.nodes = L
else: # if there is a cycle,
logger.error("Node graph is circular on the specified references")
self.nodes = L + [x for x in self.nodes if x not in L]
return
def printOpen62541Header(self, printedExternally=[], supressGenerationOfAttribute=[], outfilename=""):
unPrintedNodes = []
unPrintedRefs = []
code = []
header = []
# Reorder our nodes to produce a bare minimum of bootstrapping dependencies
logger.debug("Reordering nodes for minimal dependencies during printing.")
self.reorderNodesMinDependencies(printedExternally)
# Some macros (UA_EXPANDEDNODEID_MACRO()...) are easily created, but
# bulky. This class will help to offload some code.
codegen = open62541_MacroHelper(supressGenerationOfAttribute=supressGenerationOfAttribute)
# Populate the unPrinted-Lists with everything we have.
# Every Time a nodes printfunction is called, it will pop itself and
# all printed references from these lists.