aboutsummaryrefslogtreecommitdiffstats
path: root/longbow/src/python/site-packages/longbow/NameReport.py
diff options
context:
space:
mode:
Diffstat (limited to 'longbow/src/python/site-packages/longbow/NameReport.py')
-rwxr-xr-xlongbow/src/python/site-packages/longbow/NameReport.py818
1 files changed, 818 insertions, 0 deletions
diff --git a/longbow/src/python/site-packages/longbow/NameReport.py b/longbow/src/python/site-packages/longbow/NameReport.py
new file mode 100755
index 00000000..1d38b4ec
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/NameReport.py
@@ -0,0 +1,818 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import subprocess
+import argparse
+import csv
+import traceback
+
+import LongBow
+from Language_C import Module
+from FileUtil import *
+from pprint import pprint
+
+class NoObjectFileException(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+
+# Global error string formatting map
+conformanceErrorFormatMap = {
+ "NAMESPACE_MISMATCH" : "Function signature %s does not have the correct namespace prefix (%s)",
+ "MODULE_MISMATCH" : "Function signature %s does not have the correct module prefix (%s)",
+ "INVALID_STATIC_NAME" : "Local function signature %s must begin with an underscore '_'.",
+ "INVALID_GLOBAL_ENUM_NAME" : "Global enumeration prefix %s must match the module namespace %s.",
+ "INVALID_STATIC_ENUM_NAME" : "Local enumeration prefix %s must begin with an underscore '_'.",
+ "INVALID_GLOBAL_ENUM_VALUE_NAME" : "Global enumeration value prefix %s must match the enum name %s.",
+ "INVALID_GLOBAL_TYPEDEF_NAME" : "Global typedef prefix %s must match the module namespace %s.",
+ "INVALID_STATIC_TYPEDEF_NAME" : "Local typedef prefix %s must begin with an underscore '_'.",
+}
+
+def tuplesListToCSV(points):
+ '''
+ Convert a list of tuples -- data points -- to a list of CSV-formatted strings.
+ '''
+ lines = []
+ for point in points:
+ line = ""
+ for i in range(len(point) - 1):
+ line = line + str(point[i]) + ","
+ line = line + str(point[-1])
+ lines.append(line)
+ return lines
+
+def tuplesListToPrettyText(points, distribution = [99, 90]):
+ '''
+ Convert a list of tuples -- data points -- to a list of colorized strings based
+ on the provided distribution.
+ '''
+ lines = []
+ for point in points:
+ line = ""
+ for i in range(len(point) - 1):
+ line = line + str(point[i]) + " "
+ line = line + str(point[-1])
+ lines.append(LongBow.scoreBuilder(distribution, point[-1], line))
+ return lines
+
+def writeListToStream(llist, fileHandle, appendNewLine = True):
+ '''
+ Write the list of lines to the given file stream handle, appending a new line after
+ every line if told to do so.
+ '''
+ for line in llist:
+ fileHandle.write(line)
+ if appendNewLine:
+ fileHandle.write("\n")
+
+def isValidModuleName(namespace, typeName, moduleName):
+ '''
+ Determine if a given module name is valid (conforming) given the namespace and typename.
+
+ For example, a module in the `parc` namespace with typename `JSON` must be named `parc_JSON`.
+ '''
+ constructedName = namespace + "_" + typeName
+ return (moduleName == constructedName)
+
+def isValidFunctionName(namespace, modulePrefix, functionName, scope):
+ '''
+ Determine if a given function name is valid (conforming) given the namespace and typename.
+ '''
+ if (scope != "Local"):
+ if(not functionName.startswith(namespace)):
+ return False, conformanceErrorFormatMap["NAMESPACE_MISMATCH"] % ('"' + functionName + '"', namespace)
+ elif (not functionName.startswith(modulePrefix)):
+ return False, conformanceErrorFormatMap["MODULE_MISMATCH"] % ('"' + functionName + '"', modulePrefix)
+ else:
+ return True, ""
+ elif (scope == "Local" and not functionName.startswith("_")):
+ return False, conformanceErrorFormatMap["INVALID_STATIC_NAME"] % ('"' + functionName + '"')
+ else:
+ return True, ""
+
+def isValidTypedefName(typePrefix, name, static):
+ '''
+ Determine if a given typedef name is valid (conforming) given the namespace and typename.
+ '''
+ originalName = name
+ originalTypePrefix = typePrefix
+
+ name = name.lower()
+ typePrefix = typePrefix.lower()
+
+ if ((not static) and name.startswith(typePrefix)) and originalName[0].isupper():
+ return True, ""
+ elif ((not static) and (not name.startswith(typePrefix))):
+ return False, conformanceErrorFormatMap["INVALID_GLOBAL_TYPEDEF_NAME"] % ('"' + originalName + '"', originalTypePrefix)
+ elif (static and name.startswith('_')):
+ return True, ""
+ else:
+ return False, conformanceErrorFormatMap["INVALID_STATIC_TYPEDEF_NAME"] % ('"' + originalName + '"')
+
+def isValidEnumName(typePrefix, name, values, static):
+ '''
+ Determine if a given enumeration name is valid (conforming) given the namespace and typename.
+ '''
+ originalName = name
+ originalTypePrefix = typePrefix
+
+ name = name.lower()
+ typePrefix = typePrefix.lower()
+
+ if ((not static) and name.startswith(typePrefix)) and originalName[0].isupper():
+ pass
+ elif ((not static) and (not name.startswith(typePrefix))):
+ return False, conformanceErrorFormatMap["INVALID_GLOBAL_ENUM_NAME"] % ('"' + originalName + '"', originalTypePrefix)
+ elif (static and name.startswith('_')):
+ pass
+ else:
+ return False, conformanceErrorFormatMap["INVALID_STATIC_ENUM_NAME"] % ('"' + originalName + '"')
+
+ for enumVal in values:
+ if ((not static) and not enumVal.startswith(originalName)):
+ return False, conformanceErrorFormatMap["INVALID_GLOBAL_ENUM_VALUE_NAME"] % ('"' + enumVal + '"', originalName)
+ return True, ""
+
+def getTypedefs(path, source):
+ '''
+ Retrieve the names of typedefs (excluding enums) in a given file by parsing the file for
+ typedef specifications. This walks over every line in the file searching for each one,
+ since we cannot extract enum names nicely using linux tools.
+ '''
+ typedefs = []
+ pathToFile = os.path.join(path, source)
+ if os.path.isfile(pathToFile):
+ fin = open(os.path.join(path, source), 'r')
+ lines = fin.readlines()
+ i = 0 # LC
+ while (i < len(lines)):
+ line = lines[i].strip()
+ if not line.startswith("//"):
+ values = []
+ if "typedef" in line and "enum" not in line: # go to the end of the typedef
+ if "{" in line:
+ while "}" not in line:
+ i = i + 1
+ line = lines[i].strip()
+ name = line.replace("}","").replace(";","").strip()
+ typedefs.append(name)
+ else:
+ splits = line.split(" ")
+ if "struct" in splits[1]:
+ name = splits[3].replace(";","")
+ typedefs.append(name.strip())
+ else:
+ pass
+ i = i + 1
+ return typedefs
+
+def getEnumerations(path, source):
+ '''
+ Retrieve the names of enumerations in a given file by parsing the file for enum specifications.
+ This walks over every line in the file searching for enums, since we cannot extract enum names
+ nicely using linux tools.
+ '''
+ enums = []
+ pathToFile = os.path.join(path, source)
+ if os.path.isfile(pathToFile):
+ fin = open(os.path.join(path, source), 'r')
+ lines = fin.readlines()
+ i = 0 # LC
+ while (i < len(lines)):
+ line = lines[i].strip()
+ if not line.startswith("//"):
+ values = []
+ if "typedef enum" in line: # go to the end of the enumeration
+ while (i + 1 < len(lines) and line.find("}") < 0):
+ i = i + 1
+ line = lines[i].strip()
+ values.append(line) # append each string value
+ if (line.find("}") >= 0):
+ name = line.replace("}","").replace(";","")
+ values.pop(len(values) - 1)
+ enums.append((name.strip(), values))
+ i = i + 1
+ return enums
+
+def getTypedefsFromFiles(fileInfoList):
+ '''
+ Get the typedefs from each file in the fileInfoList.
+
+ Each element in fileInfoList is a tuple of the form (path, filename, staticTag),
+ where staticTag is a flag used to indicate if the typedefs in said file should
+ be treated as static.
+ '''
+ allTypedefs = []
+ for (path, fileName, staticTag) in fileInfoList:
+ typedefs = getTypedefs(path, fileName)
+ for typedef in typedefs:
+ allTypedefs.append((typedef, staticTag))
+ return allTypedefs
+
+def getEnumerationsFromFiles(fileInfoList):
+ '''
+ Get the enums from each file in the fileInfoList.
+
+ Each element in fileInfoList is a tuple of the form (path, filename, staticTag),
+ where staticTag is a flag used to indicate if the enums in said file should
+ be treated as static.
+ '''
+ allEnums = []
+ for (path, fileName, staticTag) in fileInfoList:
+ enums = getEnumerations(path, fileName)
+ for (enumName, values) in enums:
+ allEnums.append((enumName, values, staticTag))
+ return allEnums
+
+class FunctionConformanceContainer():
+ def __init__(self, module):
+ self.path = module.getModulePath()
+ self.module = module
+ self.failedFunctions = []
+ self.passedFunctions = []
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ self.computeConformance()
+
+ def computeConformance(self):
+ functionDictionary = {}
+ objectFileName = self.module.getPathToObjectFile()
+ sourceFileName = os.path.join(self.path, self.module.getCSourceName())
+ temp = objectFileName
+ if '.a' in temp: # If this is a path into an archive
+ temp = temp.split('(')[0]
+ if not os.path.isfile(temp):
+ raise NoObjectFileException("You must compile " + str(sourceFileName) + " to generate a corresponding object or provide a special object file path")
+
+ try:
+ functionDictionary = getDarwinTestableFunctions(objectFileName)
+ except:
+ raise Exception("You must compile " + str(sourceFileName) + " to generate a corresponding object or provide a special object file path")
+
+ namespace = self.module.getNamespace()
+ modulePrefix = self.module.getModulePrefix()
+
+ # Find all passing/failing functions
+ if (namespace != None and modulePrefix != None):
+ for scope in functionDictionary:
+ for functionName in functionDictionary[scope]:
+ isValid, reason = isValidFunctionName(namespace, modulePrefix, functionName, scope)
+ if isValid:
+ self.addPassedFunction(functionName)
+ else:
+ self.addFailedFunction(functionName, reason)
+
+ def containsMainFunction(self):
+ for (functionName, reason) in self.failedFunctions:
+ if (functionName.strip() == "main" or functionName.strip().startswith("main")):
+ return True
+ for functionName in self.passedFunctions:
+ if (functionName.strip() == "main" or functionName.strip().startswith("main")):
+ return True
+ return False
+
+ @staticmethod
+ def getType():
+ return "function-names"
+
+ def addFailedFunction(self, function, reason):
+ self.failedFunctions.append((function, reason))
+
+ def getFailedFunctions(self):
+ return self.failedFunctions
+
+ def addPassedFunction(self, function):
+ self.passedFunctions.append(function)
+
+ def getPassedFunctions(self):
+ return self.passedFunctions
+
+ def analyzeConformance(self):
+ '''
+ Convert the raw pass/fail function results into a set of individual
+ data points (finegrain results) and overall percentage.
+ '''
+ numPassed = len(self.passedFunctions)
+ numFailed = len(self.failedFunctions)
+
+ self.percentage = 100.0
+ self.points = []
+
+ if (numPassed + numFailed > 0): # skip invalid entries
+ self.percentage = float(float(numPassed) / float(numFailed + numPassed)) * 100.0
+
+ # Data point schema:
+ # namespace, moduleName, targetName, topic, line, col, score
+ for fname in self.passedFunctions:
+ data = ["function-name", fname, 100.0]
+ self.points.append(data)
+
+ for (fname, reason) in self.failedFunctions:
+ data = ["function-name", fname, reason, 0.0]
+ self.points.append(data)
+
+ def getNumberOfPassed(self):
+ return len(self.passedFunctions)
+
+ def getNumberOfFailed(self):
+ return len(self.failedFunctions)
+
+ def totalCSV(self):
+ return tuplesListToCSV(map(lambda point : [self.fullPath] + point, self.points))
+
+ def totalText(self, distribution):
+ formattedLines = tuplesListToPrettyText(self.points, distribution)
+ return map(lambda formattedLine : self.fullPath + " " + formattedLine, formattedLines)
+
+ def summaryCSV(self):
+ line = [(self.getType(), self.percentage)]
+ return tuplesListToCSV(line)
+
+ def summaryText(self, distribution):
+ line = [(self.getType(), self.percentage)]
+ return tuplesListToPrettyText(line, distribution)
+
+ def getScore(self):
+ return (self.getType(), self.percentage)
+
+
+class EnumConformanceContainer():
+ def __init__(self, module):
+ self.path = module.getModulePath()
+ self.module = module
+ self.failedEnums = []
+ self.passedEnums = []
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ self.computeConformance()
+
+ def computeConformance(self):
+ sourceFileName = self.module.getCSourceName()
+ headerFileName = self.module.getCHeaderName()
+
+ enums = getEnumerationsFromFiles([(self.path, sourceFileName, True), (self.path, headerFileName, False)])
+ modulePrefix = self.module.getModulePrefix()
+ if (modulePrefix != None):
+ for (enumName, values, staticTag) in enums:
+ isValid, reason = isValidEnumName(modulePrefix, enumName, values, staticTag)
+ if isValid:
+ self.addPassedEnum(enumName)
+ else:
+ self.addFailedEnum(enumName, reason)
+
+ @staticmethod
+ def getType():
+ return "enum-names"
+
+ def addFailedEnum(self, enum, reason):
+ self.failedEnums.append((enum, reason))
+
+ def getFailedEnums(self):
+ return self.failedEnums
+
+ def addPassedEnum(self, enum):
+ self.passedEnums.append(enum)
+
+ def getPassedEnums(self):
+ return self.passedEnums
+
+ def analyzeConformance(self):
+ '''
+ Convert the raw pass/fail enum results into a set of individual
+ data points (finegrain results) and overall percentage.
+ '''
+ self.enumPercentage = 100.0
+ self.points = []
+ numPassed = len(self.passedEnums)
+ numFailed = len(self.failedEnums)
+
+ if (numPassed + numFailed > 0):
+ self.enumPercentage = float((float(numPassed) / float(numPassed + numFailed)) * 100)
+
+ for ename in self.passedEnums:
+ data = ["enum-name", ename, 100.0]
+ self.points.append(data)
+
+ for (ename, reason) in self.failedEnums:
+ data = ["enum-name", ename, reason, 0.0]
+ self.points.append(data)
+
+ def getNumberOfPassed(self):
+ return len(self.passedEnums)
+
+ def getNumberOfFailed(self):
+ return len(self.failedEnums)
+
+ def totalCSV(self):
+ return tuplesListToCSV(map(lambda point : [self.fullPath] + point, self.points))
+
+ def totalText(self, distribution):
+ formattedLines = tuplesListToPrettyText(self.points, distribution)
+ return map(lambda formattedLine : self.fullPath + " " + formattedLine, formattedLines)
+
+ def summaryCSV(self):
+ line = [(self.getType(), self.enumPercentage)]
+ return tuplesListToCSV(line)
+
+ def summaryText(self, distribution):
+ line = [(self.getType(), self.enumPercentage)]
+ return tuplesListToPrettyText(line, distribution)
+
+ def getScore(self):
+ return (self.getType(), self.enumPercentage)
+
+class TypedefConformanceContainer():
+ def __init__(self, module):
+ self.path = module.getModulePath()
+ self.module = module
+ self.failedTypedefs = []
+ self.passedTypedefs = []
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ self.computeConformance()
+
+ def computeConformance(self):
+ sourceFileName = self.module.getCSourceName()
+ headerFileName = self.module.getCHeaderName()
+
+ typedefs = getTypedefsFromFiles([(self.path, sourceFileName, True), (self.path, headerFileName, False)])
+
+ modulePrefix = self.module.getModulePrefix()
+ if (modulePrefix != None):
+ for (typedefName, staticTag) in typedefs:
+ isValid, reason = isValidTypedefName(modulePrefix, typedefName, staticTag)
+ if isValid:
+ self.addPassedTypedef(typedefName)
+ else:
+ self.addFailedTypedef(typedefName, reason)
+
+ @staticmethod
+ def getType():
+ return "typedef-names"
+
+ def addFailedTypedef(self, typedef, reason):
+ self.failedTypedefs.append((typedef, reason))
+
+ def getFailedTypedefs(self):
+ return self.failedTypedefs
+
+ def addPassedTypedef(self, typedef):
+ self.passedTypedefs.append(typedef)
+
+ def getPassedTypedefs(self):
+ return self.passedTypedefs
+
+ def analyzeConformance(self):
+ '''
+ Convert the raw pass/fail typedef results into a set of individual
+ data points (finegrain results) and overall percentage.
+ '''
+ self.points = []
+ self.typedefPercentage = 100.0
+ numPassed = len(self.passedTypedefs)
+ numFailed = len(self.failedTypedefs)
+ if (numPassed + numFailed > 0):
+ self.typedefPercentage = float(float(numPassed) / float(numFailed + numPassed)) * 100
+
+ for tName in self.passedTypedefs:
+ data = ["typedef-name", tName, 100.0]
+ self.points.append(data)
+
+ for (tName, reason) in self.failedTypedefs:
+ data = ["typedef-name", tName, reason, 0.0]
+ self.points.append(data)
+
+ def getNumberOfPassed(self):
+ return len(self.passedTypedefs)
+
+ def getNumberOfFailed(self):
+ return len(self.failedTypedefs)
+
+ def totalCSV(self):
+ return tuplesListToCSV(map(lambda point : [self.fullPath] + point, self.points))
+
+ def totalText(self, distribution):
+ formattedLines = tuplesListToPrettyText(self.points, distribution)
+ return map(lambda formattedLine : self.fullPath + " " + formattedLine, formattedLines)
+
+ def summaryCSV(self):
+ line = [(self.getType(), self.typedefPercentage)]
+ return tuplesListToCSV(line)
+
+ def summaryText(self, distribution):
+ line = [(self.getType(), self.typedefPercentage)]
+ return tuplesListToPrettyText(line, distribution)
+
+ def getScore(self):
+ return (self.getType(), self.typedefPercentage)
+
+class ModuleConformanceContainer():
+ '''
+ This conformance container stores a collection of individual type naming
+ conformance results within a particular module, and uses the information
+ contained therein to provide total finegrain and summarized
+ results of the conformance results for each type.
+ '''
+
+ def __init__(self, module):
+ self.conformanceContainers = []
+ self.path = module.getModulePath()
+ self.module = module
+ self.validName = False
+ self.process = True
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ def setProcess(self, value):
+ self.process = value
+
+ def processModule(self):
+ return self.process
+
+ def addConformanceContainer(self, complianceContainer):
+ self.conformanceContainers.append(complianceContainer)
+
+ def analyzeConformance(self):
+ for container in self.conformanceContainers:
+ container.analyzeConformance()
+
+ def getNumberOfPassed(self):
+ tuples = []
+ for container in self.conformanceContainers:
+ tuples.append((container.getType(), container.getNumberOfPassed()))
+ return tuples # list of (topic, # of passed)
+
+ def getNumberOfFailed(self):
+ tuples = []
+ for container in self.conformanceContainers:
+ tuples.append((container.getType(), container.getNumberOfFailed()))
+ return tuples # list of (topic, # of failed)
+
+ def totalCSV(self):
+ csvTuples = []
+ for container in self.conformanceContainers:
+ csvTuples = csvTuples + container.totalCSV()
+ return csvTuples
+
+ def totalText(self, distribution):
+ textTuples = []
+ for container in self.conformanceContainers:
+ textTuples = textTuples + container.totalText(distribution)
+ return textTuples
+
+ def summaryCSV(self):
+ singleTuple = [self.fullPath]
+ for container in self.conformanceContainers:
+ csvGroup = container.summaryCSV()
+ singleTuple = singleTuple + [csvGroup[-1]]
+ return tuplesListToCSV([tuple(singleTuple)])
+
+ def summaryText(self, distribution, divider=' '):
+ formattedLine = self.fullPath
+ for container in self.conformanceContainers:
+ lineGroup = container.summaryText(distribution)[0].split(" ")
+ formattedLine = formattedLine + divider + lineGroup[-2] + ' ' + lineGroup[-1]
+ return [formattedLine]
+
+ def getScores(self):
+ scores = {}
+ for container in self.conformanceContainers:
+ scoreKey, scoreVal = container.getScore()
+ scores[scoreKey] = scoreVal
+ return scores
+
+class ModuleSetConformanceContainer():
+ '''
+ This conformance container stores a collection of individual module naming
+ conformance results, and uses the information contained therein to provide
+ summaries of conformance results.
+ '''
+
+ def __init__(self):
+ self.conformanceList = []
+
+ def addConformanceContainer(self, container):
+ self.conformanceList.append(container)
+
+ def analyzeConformance(self):
+ passed = {} # passed type-number bucket
+ failed = {} # failed type-number bucket
+
+ for container in self.conformanceList:
+ passedSet = container.getNumberOfPassed()
+ for (conformanceType, number) in passedSet:
+ if (conformanceType in passed):
+ passed[conformanceType] = passed[conformanceType] + number
+ else:
+ passed[conformanceType] = number
+ failedSet = container.getNumberOfFailed()
+ for (conformanceType, number) in failedSet:
+ if (conformanceType in failed):
+ failed[conformanceType] = failed[conformanceType] + number
+ else:
+ failed[conformanceType] = number
+
+ self.typeConformancePercentages = {}
+ for conformanceType in passed:
+ total = passed[conformanceType] + failed[conformanceType]
+ percentage = 100.0
+ if (total > 0):
+ percentage = (float(passed[conformanceType]) / float(total)) * 100.0
+ self.typeConformancePercentages[conformanceType] = percentage
+
+ def summaryCSV(self):
+ collatedTuple = ["average-scores"]
+ for conformanceType in self.typeConformancePercentages:
+ collatedTuple.append(conformanceType) # append type
+ collatedTuple.append(self.typeConformancePercentages[conformanceType]) # append percentage
+ return tuplesListToCSV([tuple(collatedTuple)])
+
+ def summaryText(self, distribution):
+ formattedLine = "average-scores"
+ for conformanceType in self.typeConformancePercentages:
+ prettyTypeText = tuplesListToPrettyText([(conformanceType, self.typeConformancePercentages[conformanceType])])[0]
+ formattedLine = formattedLine + " " + prettyTypeText
+ return [formattedLine]
+
+def computeModuleNameConformance(module):
+ '''
+ Compute the module name conformance. There is no container for this result
+ since it's a simple boolean.
+ '''
+ namespace = module.getNamespace()
+ moduleName = module.getModuleName()
+ typeName = module.getTypeName()
+
+ if (namespace != None and moduleName != None and typeName != None):
+ return isValidModuleName(namespace, typeName, moduleName)
+ else:
+ return False
+
+def computeModuleConformance(module):
+ '''
+ Compute the naming conformance results for an entire module,
+ which includes conformance results for all types contained therein.
+ '''
+ moduleContainer = ModuleConformanceContainer(module)
+
+ # Get the compliance results for functions, memorizing whether or not a main function was seen
+ functionContainer = FunctionConformanceContainer(module)
+ moduleContainer.addConformanceContainer(functionContainer)
+ moduleContainer.setProcess(not functionContainer.containsMainFunction())
+
+ # Now handle enums, typedefs, etc.
+ moduleContainer.addConformanceContainer(EnumConformanceContainer(module))
+ moduleContainer.addConformanceContainer(TypedefConformanceContainer(module))
+ moduleContainer.setValidName = computeModuleNameConformance(module)
+
+ # Now that we have the data, run over it to generate the results
+ moduleContainer.analyzeConformance()
+
+ return moduleContainer
+
+def getConformanceHeaders():
+ headers = [FunctionConformanceContainer.getType(), EnumConformanceContainer.getType(), TypedefConformanceContainer.getType()]
+ return headers
+
+def gradeAndPrint(targets, objectDirs, problemsOnly=False, printPrefix=""):
+ if len(targets) < 1:
+ print "No Files To Grade"
+ return
+
+ distribution = [99, 90]
+ maxFileNameLength = max(max(map(lambda target: len(target), targets)), len("File Name"))
+
+ moduleConformanceSet = ModuleSetConformanceContainer()
+ headers = getConformanceHeaders()
+ pformat = '{prefix}{:<{maxFileNameLength}}'
+ nformat = pformat
+ for header in headers:
+ nformat = nformat + '{:>15}'
+ print nformat.format('File Name', *headers, prefix=printPrefix, maxFileNameLength=maxFileNameLength)
+
+
+ for target in targets:
+ module = Module(target, objectDirs)
+ if module.isTestSourceName():
+ continue
+ fileNamePrefix = module.getModuleName()
+ path = module.getModulePath()
+ try:
+ moduleConformance = computeModuleConformance(module)
+ if not moduleConformance.processModule():
+ pass
+ else:
+ moduleConformanceSet.addConformanceContainer(moduleConformance)
+ scores = moduleConformance.getScores()
+ minScore = 100.0
+ for key in scores:
+ score = scores[key]
+ if score < minScore:
+ minScore = score
+ scores[key] = '%3.1f'%score
+ if problemsOnly and minScore == 100.0:
+ continue
+ printVals=[]
+ for hval in headers:
+ score = 'N/A'
+ if hval in scores:
+ score = scores[hval]
+ printVals.append(score)
+ line = nformat.format(target, *printVals, prefix=printPrefix, maxFileNameLength=maxFileNameLength)
+ LongBow.scorePrinter(distribution, minScore, line)
+ except NoObjectFileException as e:
+ eformat = pformat + "Could Not Grade: No .o file found for file"
+ line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e)
+ print LongBow.buildRed(line)
+ pass
+ except Exception as e:
+ eformat = pformat + "Could Not Grade: {msg}"
+ line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e)
+ print LongBow.buildRed(line)
+ pass
+ moduleConformanceSet.analyzeConformance()
+
+
+def commandLineMain(args, targets, objectDir):
+ distribution = eval(args.distribution)
+ moduleConformanceSet = ModuleSetConformanceContainer()
+
+ summary = args.summary
+ average = args.average
+ finegrain = args.finegrain
+ if not (summary or average or finegrain):
+ summary = True
+
+ objectDirs = [objectDir]
+ for i in range(len(targets)):
+ module = Module(targets[i], objectDirs)
+ prefix = module.getModuleName()
+ path = module.getModulePath()
+
+ tb = None
+ try:
+ moduleConformance = computeModuleConformance(module)
+ if not moduleConformance.processModule():
+ print >> sys.stderr, "Skipping module " + str(prefix) + ": contains a `main` function"
+ else:
+ moduleConformanceSet.addConformanceContainer(moduleConformance)
+
+ if summary:
+ if args.output == "text":
+ writeListToStream(moduleConformance.summaryText(distribution), sys.stdout)
+ else:
+ writeListToStream(moduleConformance.summaryCSV(), sys.stdout)
+
+ if finegrain:
+ if args.output == "text":
+ writeListToStream(moduleConformance.totalText(distribution), sys.stdout)
+ else:
+ writeListToStream(moduleConformance.totalCSV(), sys.stdout)
+
+ except Exception as e:
+ tb = traceback.format_exc()
+ print >> sys.stderr, "Error: can't analyze conformance of " + os.path.join(path, prefix) + ": " + str(e)
+ finally:
+ if tb != None and args.trace:
+ print tb
+ pass
+
+ moduleConformanceSet.analyzeConformance()
+ if average:
+ if args.output == "text":
+ writeListToStream(moduleConformanceSet.summaryText(distribution), sys.stdout)
+ else:
+ writeListToStream(moduleConformanceSet.summaryCSV(), sys.stdout)