aboutsummaryrefslogtreecommitdiffstats
path: root/longbow/src/python
diff options
context:
space:
mode:
authorMichele Papalini <micpapal+fdio@cisco.com>2017-02-24 08:00:33 +0000
committerGerrit Code Review <gerrit@fd.io>2017-02-24 08:00:33 +0000
commit4df7f4cc98b6288177df256e1db70ddc3f7d00db (patch)
tree55e71277b419e4830ae641868ab8e751c8b86972 /longbow/src/python
parentf28308bd99381ef5f1e178e2e1f870f245e35873 (diff)
parentec688b4723a041044226358bcd4dd6e2da39da49 (diff)
Merge "Initial commit: cframework. Longbow and Libparc" into cframework/master
Diffstat (limited to 'longbow/src/python')
-rw-r--r--longbow/src/python/.gitignore3
-rw-r--r--longbow/src/python/.project18
-rw-r--r--longbow/src/python/.pydevproject7
-rw-r--r--longbow/src/python/CMakeLists.txt29
-rwxr-xr-xlongbow/src/python/longbow-ansigcov.py85
-rwxr-xr-xlongbow/src/python/longbow-bytearray.py54
-rwxr-xr-xlongbow/src/python/longbow-code.py208
-rwxr-xr-xlongbow/src/python/longbow-complexity-report.py213
-rwxr-xr-xlongbow/src/python/longbow-coverage-report.py87
-rwxr-xr-xlongbow/src/python/longbow-doxygen-report.py166
-rwxr-xr-xlongbow/src/python/longbow-generate-about.py289
-rwxr-xr-xlongbow/src/python/longbow-name-report.py91
-rwxr-xr-xlongbow/src/python/longbow-preprocess.py153
-rwxr-xr-xlongbow/src/python/longbow-size-report.py131
-rwxr-xr-xlongbow/src/python/longbow-style-report.py99
-rwxr-xr-xlongbow/src/python/longbow-test-run.py172
-rwxr-xr-xlongbow/src/python/longbow-test-suite.py55
-rwxr-xr-xlongbow/src/python/longbow-vocabulary-report.py66
-rwxr-xr-xlongbow/src/python/parc_uncrustify.cfg115
-rw-r--r--longbow/src/python/site-packages/CMakeLists.txt12
-rwxr-xr-xlongbow/src/python/site-packages/longbow.pth18
-rw-r--r--longbow/src/python/site-packages/longbow/.gitignore1
-rwxr-xr-xlongbow/src/python/site-packages/longbow/ANSITerm.py62
-rwxr-xr-xlongbow/src/python/site-packages/longbow/CoverageReport.py262
-rwxr-xr-xlongbow/src/python/site-packages/longbow/DoxygenReport.py161
-rwxr-xr-xlongbow/src/python/site-packages/longbow/FileUtil.py102
-rwxr-xr-xlongbow/src/python/site-packages/longbow/GCov.py232
-rwxr-xr-xlongbow/src/python/site-packages/longbow/GCovSummary.py42
-rwxr-xr-xlongbow/src/python/site-packages/longbow/Language_C.py202
-rwxr-xr-xlongbow/src/python/site-packages/longbow/LongBow.py96
-rwxr-xr-xlongbow/src/python/site-packages/longbow/NameReport.py818
-rwxr-xr-xlongbow/src/python/site-packages/longbow/StyleReport.py382
-rwxr-xr-xlongbow/src/python/site-packages/longbow/SymbolTable.py87
-rwxr-xr-xlongbow/src/python/site-packages/longbow/VocabularyReport.py162
34 files changed, 4680 insertions, 0 deletions
diff --git a/longbow/src/python/.gitignore b/longbow/src/python/.gitignore
new file mode 100644
index 00000000..34921562
--- /dev/null
+++ b/longbow/src/python/.gitignore
@@ -0,0 +1,3 @@
+longbow-generate-about
+longbow-code
+longbow-preprocess
diff --git a/longbow/src/python/.project b/longbow/src/python/.project
new file mode 100644
index 00000000..fd327b61
--- /dev/null
+++ b/longbow/src/python/.project
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<projectDescription>
+ <name>LongBow-Python</name>
+ <comment></comment>
+ <projects>
+ <project>Longbow</project>
+ </projects>
+ <buildSpec>
+ <buildCommand>
+ <name>org.python.pydev.PyDevBuilder</name>
+ <arguments>
+ </arguments>
+ </buildCommand>
+ </buildSpec>
+ <natures>
+ <nature>org.python.pydev.pythonNature</nature>
+ </natures>
+</projectDescription>
diff --git a/longbow/src/python/.pydevproject b/longbow/src/python/.pydevproject
new file mode 100644
index 00000000..f4a65975
--- /dev/null
+++ b/longbow/src/python/.pydevproject
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?eclipse-pydev version="1.0"?>
+
+<pydev_project>
+<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
+<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
+</pydev_project>
diff --git a/longbow/src/python/CMakeLists.txt b/longbow/src/python/CMakeLists.txt
new file mode 100644
index 00000000..157cda21
--- /dev/null
+++ b/longbow/src/python/CMakeLists.txt
@@ -0,0 +1,29 @@
+add_subdirectory(site-packages)
+
+install( FILES parc_uncrustify.cfg DESTINATION ${CMAKE_INSTALL_PREFIX}/etc )
+
+macro(AddLongBowPythonScript scriptFile)
+ configure_file(${ARGV0}.py ${ARGV0} @ONLY)
+ install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/${ARGV0} DESTINATION ${CMAKE_INSTALL_PREFIX}/bin)
+endmacro(AddLongBowPythonScript)
+
+set(ScriptList
+ longbow-doxygen-report
+ longbow-generate-about
+ longbow-preprocess
+ longbow-code
+ longbow-complexity-report
+ longbow-coverage-report
+ longbow-bytearray
+ longbow-ansigcov
+ longbow-name-report
+ longbow-size-report
+ longbow-style-report
+ longbow-test-run
+ longbow-test-suite
+ longbow-vocabulary-report
+ )
+
+foreach(script ${ScriptList})
+ AddLongBowPythonScript(${script})
+endforeach()
diff --git a/longbow/src/python/longbow-ansigcov.py b/longbow/src/python/longbow-ansigcov.py
new file mode 100755
index 00000000..c677ab2c
--- /dev/null
+++ b/longbow/src/python/longbow-ansigcov.py
@@ -0,0 +1,85 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import sys
+import os
+import subprocess
+'''
+This programme takes a previously generated LongBow production (see longbow-preprocess.py) as input
+and generates corresponding C code as a template for a complete test runner for that production.
+'''
+
+ansiRed = "\x1b[31m";
+ansiGreen = "\x1b[32m";
+ansiYellow = "\x1b[33;1m";
+ansiOrange = "\x1b[33m";
+ansiReset = "\x1b[0m";
+
+
+def ANSITerminal_printchars(color, chars):
+ if color == "red":
+ return ansiRed + chars + ansiReset
+ if color == "green":
+ return ansiGreen + chars + ansiReset
+ if color == "yellow":
+ return ansiYellow + chars + ansiReset
+ if color == "orange":
+ return ansiOrange + chars + ansiReset
+ return chars
+
+
+class LongBowAnsi:
+ def __init__(self, input=sys.stdin):
+ self.input = input
+ return
+
+ def tokenise(self, line):
+ fields = line.split(":", 2)
+ fields[0] = fields[0].strip()
+ return fields
+
+ def colourise(self):
+ lines = self.input.read().splitlines()
+ for line in lines:
+ fields = self.tokenise(line)
+ if len(fields) == 3:
+ if fields[0] == "#####":
+ print ANSITerminal_printchars("red", fields[1]), ANSITerminal_printchars("red", fields[2])
+ pass
+ elif fields[0] == "$$$$$":
+ print ANSITerminal_printchars("yellow", fields[1]), ANSITerminal_printchars("yellow", fields[2])
+ pass
+ else:
+ print ANSITerminal_printchars("green", fields[1]), ANSITerminal_printchars("green", fields[2])
+ pass
+ pass
+ pass
+ return
+
+
+if __name__ == '__main__':
+ outputFileName = None
+
+ if len(sys.argv) != 2:
+ print "Usage: longbow-ansigov.py file.gcov"
+ sys.exit(1)
+
+ with open(sys.argv[1], 'r') as f:
+ longBowAnsi = LongBowAnsi(f)
+ longBowAnsi.colourise()
+ f.close()
+
+ pass
diff --git a/longbow/src/python/longbow-bytearray.py b/longbow/src/python/longbow-bytearray.py
new file mode 100755
index 00000000..30adfda3
--- /dev/null
+++ b/longbow/src/python/longbow-bytearray.py
@@ -0,0 +1,54 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import sys
+import os
+
+def indent(count):
+ for i in range(0, count/2):
+ print " ",
+ return
+
+def print_interstitialspace(index):
+ if ((index + 1) % 8) == 0:
+ print " ",
+ if ((index + 1) % 16) == 0:
+ print ""
+ indent(4)
+ pass
+
+def printarray(array):
+ i = 0
+ for c in array:
+ print "0x%02x," % (c),
+ print_interstitialspace(i)
+ i = i + 1
+ pass
+
+ return
+
+if __name__ == '__main__':
+ with open(sys.argv[1], 'r') as f:
+
+ bytes = bytearray(f.read())
+ print len(bytes)
+ print "unsigned char bytes[] = {"
+ indent(4)
+ printarray(bytes)
+ print "\n};";
+ pass
+
+ f.close()
diff --git a/longbow/src/python/longbow-code.py b/longbow/src/python/longbow-code.py
new file mode 100755
index 00000000..8d5a72b6
--- /dev/null
+++ b/longbow/src/python/longbow-code.py
@@ -0,0 +1,208 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import sys
+import os
+import subprocess
+'''
+This programme takes a previously generated LongBow production (see longbow-preprocess.py) as input
+and generates corresponding C code as a template for a complete test runner for that production.
+'''
+
+def sourceFileNameToShortName(sourceFileName):
+ '''
+ Given a path to a source file, return the name without any path components or suffix after the first '.' (inclusive).
+ '''
+ name = os.path.basename(sourceFileName)
+ return name.split(".")[0]
+
+def canonicalizeFunctionName(functionName):
+ '''
+ Given a function name that contains the initial '_' character,
+ strip it and return a canonicalised form of the same name suitable for invoking from a C source file.
+
+ This used to translate the typical function name mangling by the C compiler,
+ where foo() becomes _foo in the object file symbol table.
+ '''
+ if functionName[0] == "_":
+ functionName = functionName[1:]
+ return functionName
+
+class LongBowTestGenerator:
+ def __init__(self, output=sys.stdout):
+ self.output = output
+ return
+
+ def generateCode(self, testProduction):
+ testRunnerName = testProduction["name"]
+ sourceFileName = testProduction["files"]["sourceFile"]
+ objectFileName = testProduction["files"]["objectFile"]
+
+ self.filePrologue()
+ self.testRunnerPrologue(sourceFileName, objectFileName, testRunnerName, testProduction["testFixtures"])
+
+ for testFixture in testProduction["testFixtures"]:
+ fixtures = self.generateTestFixture(testProduction, testFixture)
+ pass
+
+ self.testRunnerEpilogue(sourceFileName, objectFileName, testRunnerName, testProduction["testFixtures"])
+ return
+
+ def filePrologue(self):
+ self.output.write("/*\n")
+ self.output.write(" *\n")
+ self.output.write(" */\n")
+ self.output.write("\n")
+ return
+
+ def testRunnerPrologue(self, sourceFileName, objectFileName, testRunnerName, testFixtures):
+ self.output.write("// Include the file(s) containing the functions to be tested.\n")
+ self.output.write("// This permits internal static functions to be visible to this Test Runner.\n")
+ self.output.write("#include \"%s\"\n" % (sourceFileName))
+ self.output.write("\n")
+ self.output.write("#include <LongBow/unit-test.h>\n")
+ self.output.write("\n")
+ self.output.write("LONGBOW_TEST_RUNNER(%s)\n" % (testRunnerName))
+ self.output.write("{\n")
+ self.output.write(" // The following Test Fixtures will run their corresponding Test Cases.\n")
+ self.output.write(" // Test Fixtures are run in the order specified here, but every test must be idempotent.\n")
+ self.output.write(" // Never rely on the execution order of tests or share state between them.\n")
+ for testFixture in testFixtures:
+ self.output.write(" LONGBOW_RUN_TEST_FIXTURE(%s);\n" % (testFixture["name"]))
+ pass
+ self.output.write("}\n")
+ self.output.write("\n" )
+ self.output.write("// The Test Runner calls this function once before any Test Fixtures are run.\n")
+ self.output.write("LONGBOW_TEST_RUNNER_SETUP(%s)\n" % (testRunnerName))
+ self.output.write("{\n")
+ self.output.write(" return LONGBOW_STATUS_SUCCEEDED;\n")
+ self.output.write("}\n")
+ self.output.write("\n")
+ self.output.write("// The Test Runner calls this function once after all the Test Fixtures are run.\n")
+ self.output.write("LONGBOW_TEST_RUNNER_TEARDOWN(%s)\n" % (testRunnerName))
+ self.output.write("{\n")
+ self.output.write(" return LONGBOW_STATUS_SUCCEEDED;\n")
+ self.output.write("}\n")
+ self.output.write("\n")
+ return
+
+ def testRunnerEpilogue(self, sourceFileName, objectFileName, testRunnerName, testFixtures):
+ self.output.write("int\n")
+ self.output.write("main(int argc, char *argv[])\n")
+ self.output.write("{\n")
+ self.output.write(" LongBowTestRunner *testRunner = LONGBOW_TEST_RUNNER_CREATE(%s);\n" % (testRunnerName))
+ self.output.write(" int exitStatus = longBowMain(argc, argv, testRunner, NULL);\n");
+ self.output.write(" longBowTestRunner_Destroy(&testRunner);\n");
+ self.output.write(" exit(exitStatus);\n");
+ self.output.write("}\n")
+ return
+
+ def generateTestFixture(self, testProduction, testFixture):
+ testFixtureName = testFixture["name"]
+
+ sourceFileName = testProduction["files"]["sourceFile"]
+ objectFileName = testProduction["files"]["objectFile"]
+
+ self.testFixturePrologue(sourceFileName, objectFileName, testFixtureName, testFixture["testSuites"])
+
+ for testSuite in testFixture["testSuites"]:
+ self.generateTestSuite(testProduction, testFixture, testSuite)
+ pass
+
+ self.testFixtureEpilogue(testProduction, testFixture, testSuite)
+ return [ testFixtureName ]
+
+ def testFixturePrologue(self, sourceFileName, objectFileName, testFixtureName, testSuites):
+ self.output.write("LONGBOW_TEST_FIXTURE(%s)\n" % (testFixtureName))
+ self.output.write("{\n")
+ for testSuite in testSuites:
+ for testCase in testSuite["testCases"]:
+ self.output.write(" LONGBOW_RUN_TEST_CASE(%s, %s);\n" % (testFixtureName, testCase))
+ pass
+ pass
+ self.output.write("}\n")
+ self.output.write("\n")
+ self.output.write("LONGBOW_TEST_FIXTURE_SETUP(%s)\n" % (testFixtureName))
+ self.output.write("{\n")
+ self.output.write(" return LONGBOW_STATUS_SUCCEEDED;\n")
+ self.output.write("}\n")
+ self.output.write("\n")
+ self.output.write( "LONGBOW_TEST_FIXTURE_TEARDOWN(%s)\n" % (testFixtureName))
+ self.output.write("{\n")
+ self.output.write(" return LONGBOW_STATUS_SUCCEEDED;\n")
+ self.output.write("}\n")
+ self.output.write("\n")
+ return
+
+ def testFixtureEpilogue(self, testProduction, testFixture, testSuite):
+ return
+
+ def generateTestSuite(self, testProduction, testFixture, testSuite):
+ for testCase in testSuite["testCases"]:
+ self.generateTestCase(testProduction, testFixture, testCase)
+ return
+
+ def generateTestCase(self, testProduction, testFixture, testCase):
+ self.output.write("LONGBOW_TEST_CASE(%s, %s)\n" % (testFixture["name"], testCase))
+ self.output.write("{\n")
+ self.output.write(" testUnimplemented(\"\");\n")
+ self.output.write("}\n")
+ self.output.write("\n")
+ return
+
+def getProductionSchema(fileName):
+ '''
+ Get the "production" schema produced by the preprocessor.
+ '''
+ f = open(fileName, "r")
+ text = f.read()
+ f.close()
+ return eval(text)
+
+def canonicalOutputFileName(production):
+ outputFileName = "test_" + sourceFileNameToShortName(production["files"]["sourceFile"]) + ".c"
+ return outputFileName
+
+def canonicalOutput(outputFileName):
+ if outputFileName == None:
+ return sys.stdout
+ open(outputFileName)
+
+if __name__ == '__main__':
+ '''
+@(#) longbow-code @VERSION@ @DATE@
+@(#) All Rights Reserved. Use is subject to license terms.
+ '''
+ outputFileName = None
+
+ if len(sys.argv) != 2:
+ print "Usage: longbow-code file.longbow"
+ sys.exit(1)
+
+ production = getProductionSchema(sys.argv[1])
+
+ if outputFileName == None:
+ outputFileName = canonicalOutputFileName(production)
+
+ if os.path.exists(outputFileName):
+ print "Refusing to overwrite the existing '%s'." % (outputFileName)
+ sys.exit(1)
+
+ outputFile = open(outputFileName, 'w')
+
+ generator = LongBowTestGenerator(outputFile)
+ generator.generateCode(production)
+ pass
diff --git a/longbow/src/python/longbow-complexity-report.py b/longbow/src/python/longbow-complexity-report.py
new file mode 100755
index 00000000..81ddcf72
--- /dev/null
+++ b/longbow/src/python/longbow-complexity-report.py
@@ -0,0 +1,213 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import argparse
+import itertools
+sys.path.append("@INSTALL_PYTHON_DIR@")
+sys.path.append("@DEPENDENCY_PYTHON_DIR@")
+import LongBow
+try:
+ import hfcca
+except ImportError:
+ print "HFCCA not found. You need to download hfcca.py and place it in a location"
+ print "where this script (python) can find it."
+ print "You can find a compatible version of hfcca at: "
+ print " https://headerfile-free-cyclomatic-complexity-analyzer.googlecode.com/files/hfcca.py"
+ print "And place it at: @INSTALL_PYTHON_DIR@"
+ print
+ print "... however, you should have run the ccnx-post-install script"
+ print " (from the ccnx distribution you got this from)"
+ sys.exit(1)
+
+def computeComplexityScore(complexity):
+ score = min(100.0 * abs(1.0 - float(complexity - 5) / 50.0), 100.0)
+ return score
+
+def csvFunctionResult(file, function):
+ score = computeComplexityScore(function.cyclomatic_complexity)
+ string = "complexity,%s,%s,%d,%d,%.2f" % (file.filename, function.name, function.start_line, function.cyclomatic_complexity, score)
+
+ LongBow.scorePrinter([90, 80], score, string)
+ return function.cyclomatic_complexity
+
+def csvFileComplexity(file):
+ score = computeComplexityScore(file.average_CCN)
+ string = "complexity,%s,,,%.2f,%.2f" % (file.filename, file.average_CCN, score)
+ LongBow.scorePrinter([90, 80], score, string)
+ return
+
+def csvFunction(fileInformationList):
+ for fileInformation in fileInformationList:
+ complexities = map(lambda function: csvFunctionResult(fileInformation, function), fileInformation)
+ return
+
+def csvSummary(fileInformationList):
+ map(lambda file: csvFileComplexity(file), fileInformationList)
+ return
+
+
+def textFunctionResult(file, function, maxFileNameLength, maxFunctionNameLength):
+ score = computeComplexityScore(function.cyclomatic_complexity)
+ format = "%-" + str(maxFileNameLength) + "s %-" + str(maxFunctionNameLength) + "s %6d %2d %6.2f"
+ string = format % (file.filename, function.name, function.start_line, function.cyclomatic_complexity, score)
+
+ LongBow.scorePrinter([90, 80], score, string)
+ return function.cyclomatic_complexity
+
+def textFileComplexity(file, maxFileNameLength):
+ score = computeComplexityScore(file.average_CCN)
+ string = ("%-" + str(maxFileNameLength) + "s %6.2f %6.2f") % (file.filename, file.average_CCN, score)
+ LongBow.scorePrinter([90, 80], score, string)
+ return
+
+def computeMaxFileNameLength(fileInformationList):
+ result = 0
+ for fileInformation in fileInformationList:
+ if len(fileInformation.filename) > result:
+ result = len(fileInformation.filename)
+ return result
+
+def computeMaxFunctionNameLength(fileInformationList):
+ result = 0
+ for fileInformation in fileInformationList:
+ if len(fileInformation.filename) > result:
+ result = len(fileInformation.filename)
+ return result
+
+def textFunction(fileInformationList):
+ maxFileNameLength = max(map(lambda fileInformation: len(fileInformation.filename), fileInformationList))
+ maxFunctionNameLength = max(map(lambda fileInformation: max(map(lambda function: len(function.name), fileInformation)), fileInformationList))
+
+ for fileInformation in fileInformationList:
+ complexities = map(lambda function: textFunctionResult(fileInformation, function, maxFileNameLength, maxFunctionNameLength), fileInformation)
+ return
+
+def textSummary(fileInformationList):
+ maxFileNameLength = max(map(lambda fileInformation: len(fileInformation.filename), fileInformationList))
+ map(lambda file: textFileComplexity(file, maxFileNameLength), fileInformationList)
+ return
+#
+# Recompute the file's average complexity as a floating point number.
+def recomputeFileComplexity(fileInformation):
+ complexities = map(lambda function: function.cyclomatic_complexity, fileInformation)
+ if len(complexities) > 0:
+ sum = reduce(lambda sum, complex: sum + complex, complexities)
+ fileInformation.average_CCN = float(sum) / len(fileInformation)
+ else:
+ fileInformation.average_CCN = 0
+ return fileInformation.average_CCN
+
+def recomputeFilesComplexity(fileInformationList):
+ return map(lambda fileInformation: recomputeFileComplexity(fileInformation), fileInformationList)
+
+def computeAverage(fileInformationList):
+ cyclomaticComplexity = map(lambda fileInformation : fileInformation.average_CCN, fileInformationList)
+ sum = reduce(lambda sum, x: sum + x, cyclomaticComplexity)
+ return float(sum) / float(len(cyclomaticComplexity))
+
+def main(argv):
+ desc = '''longbow-complexity-report @VERSION@ @DATE@
+ All Rights Reserved. Use is subject to license terms.
+
+Print the cyclomatic complexity of functions and files.
+
+The option --function displays the file name, function name,
+line number of the function, the cyclomatic complexity and a score ranging from 0 to 100.
+
+The default option --summary displays the file name,
+the average cyclomatic complexity of all functions in the file and
+a score ranging from 0 to 100.
+
+Input is either from a list of files supplied as command line parameters,
+or as a list of newline separated file names read from standard input.
+Output is a plain text (default) or comma-separated-value (CSV).
+
+Examples:
+
+% longbow-complexity-report *.[ch]
+
+Report conformance of the .c and .h files specified as command line parameters.
+
+% longbow-complexity-report -
+Report conformance of the .c and .h files read from standard input, one line per file.
+
+$ longbow-complexity-report parc_JSON.c
+parc_JSON.c 2.27 100.00
+$
+$ echo parc_JSON.c | longbow-complexity-report -o csv -
+complexity,parc_JSON.c,,,2.27,100.00
+$
+'''
+
+ parser = argparse.ArgumentParser(prog='longbow-complexity-report', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
+ parser.add_argument('-s', '--summary', default=False, action="store_true", help="print the average complexity of each target file.")
+ parser.add_argument('-f', '--function', default=False, action="store_true", help="print the complexity of each function in each target file.")
+ parser.add_argument('-', '--stdin', default=False, action="store_true", required=False, help="read the list of files from standard input rather than the command line.")
+ parser.add_argument('-a', '--average', default=False, action="store_true", required=False, help="display only the simple average of the average complexity of each target file.")
+ parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str, help="the output format: \"text\" or \"csv\"")
+ parser.add_argument("files", help="Files to check", nargs="*")
+
+ args = parser.parse_args()
+
+ targets = []
+
+ if args.stdin:
+ for line in sys.stdin:
+ t = line.strip()
+ if (len(t) > 0):
+ targets.append(t)
+ else:
+ targets = args.files
+
+ if (len(targets) == 0):
+ print >> sys.stderr, "Error: No files to analyze. See %s -h" % (sys.argv[0])
+ sys.exit(1)
+
+ # If nothing was specified, print the summary as a default
+ if args.summary == False and args.function == False and args.average == False:
+ args.summary = True
+
+ options, arguments = hfcca.createHfccaCommandLineParser().parse_args(args=[argv[0]])
+ result = hfcca.analyze(targets, options)
+
+ # Convert from that iterator to a simple list...
+ fileInformationList = map(lambda x : x, result)
+
+ recomputeFilesComplexity(fileInformationList)
+
+ if args.function:
+ if args.output == "text":
+ textFunction(fileInformationList)
+ else:
+ csvFunction(fileInformationList)
+
+ if args.summary:
+ if args.output == "text":
+ textSummary(fileInformationList)
+ else:
+ csvSummary(fileInformationList)
+
+ if args.average:
+ print "%.2f" % computeAverage(fileInformationList)
+
+if __name__ == "__main__":
+ '''
+@(#) longbow-complexity-report @VERSION@ @DATE@
+@(#) All Rights Reserved. Use is subject to license terms.
+'''
+ main(sys.argv)
diff --git a/longbow/src/python/longbow-coverage-report.py b/longbow/src/python/longbow-coverage-report.py
new file mode 100755
index 00000000..4a0a86ab
--- /dev/null
+++ b/longbow/src/python/longbow-coverage-report.py
@@ -0,0 +1,87 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import sys
+import argparse
+
+sys.path.append("@INSTALL_PYTHON_DIR@")
+sys.path.append("@DEPENDENCY_PYTHON_DIR@")
+sys.path.append("../site-packages/longbow/")
+import CoverageReport
+
+
+if __name__ == '__main__':
+ '''
+@(#) longbow-coverage-report @VERSION@ @DATE@
+@(#) All Rights Reserved. Use is subject to license terms.
+'''
+ description = '''
+longbow-coverage-report @VERSION@ @DATE@
+All Rights Reserved. Use is subject to license terms.
+
+Report on the code coverage of tests.
+
+The source files or executables to analyse are supplied as command line parameters,
+or as a list of newline separated file names read from standard input.
+
+Output is plain-text (default --output text) or a CSV file (--output csv)
+reporting the results.
+
+Results are:
+ An average of all files specified (--average)
+ A one line summary of all files specified (--summary)
+ A listing of the original source file, colorized showing tested and non-tested lines.
+ '''
+ parser = argparse.ArgumentParser(prog='longbow-coverage-report',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description=description)
+ parser.add_argument('-', '--stdin', default=False, action="store_true", required=False,
+ help="Read the list of files from standard input.")
+ parser.add_argument('-s', '--summary', default=False, action="store_true", required=False,
+ help="Display the score for each file (excluding test source files).")
+ parser.add_argument('-a', '--average', default=False, action="store_true", required=False,
+ help="Display the average score for all C source files (excluding test source files).")
+ parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str,
+ help="Set the output format: \"text\" or \"csv\"")
+ parser.add_argument('-v', '--visual', default=False, action="store_true", required=False,
+ help="Colorize the original source code showing coverage")
+ parser.add_argument('-x', '--explain', default=False, action="store_true", required=False,
+ help="Display information about the collection of coverage information (guru mode).")
+ parser.add_argument('-d', '--distribution', default="[95, 90]", action="store", required=False, type=str,
+ help="A list containing the score distributions for pretty-printing. Default [95, 90]")
+ parser.add_argument('-T', '--includeTestSources', default=False, action="store_true", required=False,
+ help="Include analysis of the test sources. Default False")
+ parser.add_argument('-t', '--testDir', default="", action="store", required=False, type=str,
+ help="Directory hint for locating test files.")
+
+ parser.add_argument("files", help="Files to check", nargs="*")
+
+ args = parser.parse_args()
+
+ if not args.summary and not args.average and not args.visual and not args.explain:
+ args.summary = True
+
+ fileNames = []
+
+ if args.stdin:
+ for line in sys.stdin:
+ t = line.strip()
+ if len(t) > 0:
+ fileNames.append(t)
+ else:
+ fileNames = args.files
+
+ CoverageReport.commandLineMain(args, fileNames, args.testDir)
diff --git a/longbow/src/python/longbow-doxygen-report.py b/longbow/src/python/longbow-doxygen-report.py
new file mode 100755
index 00000000..1b303f91
--- /dev/null
+++ b/longbow/src/python/longbow-doxygen-report.py
@@ -0,0 +1,166 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import pprint
+import subprocess
+import difflib
+import csv
+import argparse
+sys.path.append("@INSTALL_PYTHON_DIR@")
+sys.path.append("@DEPENDENCY_PYTHON_DIR@")
+import LongBow
+
+def concatenateContinuationLines(lines):
+ '''
+ Parse doxygen log lines.
+ Lines that are indented by a space are continutations of the previous line.
+ '''
+ result = list()
+ accumulator = ""
+ for line in lines:
+ line = line.rstrip()
+ if line.startswith(" ") == False and line.startswith(" ") == False:
+ if len(accumulator) > 0:
+ result.append(accumulator)
+ accumulator = line
+ else:
+ accumulator = accumulator + " " + line.lstrip()
+
+ result.append(accumulator)
+
+ return result
+
+def parseLine(line):
+ result = None
+ if not line.startswith("<"):
+ fields = line.split(":")
+ if len(fields) >= 4:
+ result = { "fileName" : fields[0].strip(),
+ "lineNumber" : int(fields[1].strip()),
+ "type" : "documentation",
+ "severity" : fields[2].strip(),
+ "message" : " ".join(fields[3:]).strip()}
+ elif line.startswith("error"):
+ print line
+ elif len(line) > 0:
+ print "Consider using doxygen -s:", line
+
+ return result
+
+def canonicalize(lines):
+ lines = concatenateContinuationLines(lines)
+ parsedLines = map(lambda line: parseLine(line), lines)
+ parsedLines = filter(lambda line: line != None, parsedLines)
+ return parsedLines
+
+def organize(entries):
+ result = dict()
+
+ for entry in entries:
+ if not entry["fileName"] in result:
+ result[entry["fileName"]] = dict()
+
+ entryByFile = result[entry["fileName"]]
+
+ if not str(entry["lineNumber"]) in entryByFile:
+ entryByFile[str(entry["lineNumber"])] = list()
+ if not entry in entryByFile[str(entry["lineNumber"])]:
+ entryByFile[str(entry["lineNumber"])].append(entry)
+
+ return result
+
+def textualSummary(distribution, documentation):
+ maxWidth = 0
+ for entry in documentation:
+ if len(entry) > maxWidth:
+ maxWidth = len(entry)
+
+ formatString ="%-" + str(maxWidth) + "s %8d %8d %.2f%%"
+ for entry in documentation:
+ badLines = len(documentation[entry])
+ totalLines = LongBow.countLines(entry)
+ score = float(totalLines - badLines) / float(totalLines) * 100.0
+ LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score))
+ return
+
+def textualAverage(distribution, documentation, format):
+ sum = 0.0
+
+ for entry in documentation:
+ badLines = len(documentation[entry])
+ totalLines = LongBow.countLines(entry)
+ score = float(totalLines - badLines) / float(totalLines) * 100.0
+ sum = sum + score
+
+ if len(documentation) == 0:
+ averageScore = 100.0
+ else:
+ averageScore = sum / float(len(documentation))
+
+ LongBow.scorePrinter(distribution, averageScore, format % averageScore)
+
+def csvSummary(distribution, documentation):
+ formatString ="documentation,%s,%d,%d,%.2f%%"
+ for entry in documentation:
+ badLines = len(documentation[entry])
+ totalLines = LongBow.countLines(entry)
+ score = float(totalLines - badLines) / float(totalLines) * 100.0
+ LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score))
+ return
+
+def main(argv):
+ parser = argparse.ArgumentParser(prog='longbow-doxygen-report', formatter_class=argparse.RawDescriptionHelpFormatter, description="")
+ parser.add_argument('-l', '--doxygenlog', default=False, action="store", required=True, type=str, help="The doxygen output log to use.")
+ parser.add_argument('-s', '--summary', default=False, action="store_true", required=False, help="Produce the score for each file")
+ parser.add_argument('-a', '--average', default=False, action="store_true", required=False, help="Produce the simple average of all scores.")
+ parser.add_argument('-d', '--distribution', default="[100, 95]", action="store", required=False, type=str, help="A list containing the score distributions for pretty-printing")
+ parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str, help="The required output format. text, csv")
+
+ args = parser.parse_args()
+
+ if not args.summary and not args.average:
+ args.summary = True
+
+ with open(args.doxygenlog, 'r') as f:
+ lines = f.readlines()
+
+ lines = canonicalize(lines)
+
+ result = organize(lines)
+
+ pp = pprint.PrettyPrinter(indent=4)
+ #pp.pprint(result)
+
+ distribution = eval(args.distribution)
+ if args.summary:
+ if args.output == "text":
+ textualSummary(distribution, result)
+ else:
+ csvSummary(distribution, result)
+
+ if args.average:
+ textualAverage(distribution, result, "%.2f")
+
+
+if __name__ == '__main__':
+ '''
+@(#) longbow-doxygen-report @VERSION@ @DATE@
+@(#) All Rights Reserved. Use is subject to license terms.
+ '''
+ main(sys.argv)
diff --git a/longbow/src/python/longbow-generate-about.py b/longbow/src/python/longbow-generate-about.py
new file mode 100755
index 00000000..437102a3
--- /dev/null
+++ b/longbow/src/python/longbow-generate-about.py
@@ -0,0 +1,289 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import os
+import sys
+import string
+import datetime
+import argparse
+sys.path.append("@INSTALL_PYTHON_DIR@")
+sys.path.append("@DEPENDENCY_PYTHON_DIR@")
+import FileUtil
+
+whatLineToken = "@(#)"
+
+def translateCCharacter(character):
+ result = character
+
+ if character == '\t':
+ result = "\\t"
+ elif character == "\n":
+ result = "\\n"
+ elif character == "\"":
+ result = "\\\""
+ elif character == "\'":
+ result = "\\'"
+
+ return result
+
+
+def makeWhatLine(line):
+ return "@(#)" + line
+
+def createCString(string):
+ if string is None:
+ result = "None"
+ else:
+ result = "".join(map(lambda character: translateCCharacter(character), string))
+ return result
+
+def createQuotedCString(string):
+ return "\"%s\"" % createCString(string)
+
+def cIdentifier(name):
+ translation = string.maketrans("-!@#$%^&*()_-+=[]{}|;:<>,./?", "____________________________")
+ return name.translate(translation)
+
+
+def validateArgument(arg):
+ '''
+ If the given parameter is equal to '-' return None, otherwise return the parameter.
+ '''
+ if arg == "-":
+ return None
+ return arg
+
+
+class LongBowGenerateAboutHFile:
+ def __init__(self, prefix):
+ self.prefix = prefix
+ return
+
+ def headerdocFunction(self, functionName, OneLineDescription, returns):
+ result = "/**\n"
+ result += " * %s\n" % OneLineDescription
+ result += " *\n"
+ result += " * @return %s\n" % returns
+ result += " */\n"
+ return result
+
+ def FileName(self):
+ return self.prefix + "_About.h"
+
+ def Name(self):
+ functionName = "%sAbout_Name" % self.prefix
+ result = self.headerdocFunction(functionName, "Return the name as a C string.", "The name as a C string.")
+ result += "const char *%s(void);\n" % functionName
+ return result
+
+ def Version(self):
+ functionName = "%sAbout_Version" % self.prefix
+ result = self.headerdocFunction(functionName, "Return the version as a C string.", "The version as a C string.")
+ result += "const char *%s(void);\n" % functionName
+ return result
+
+ def About(self):
+ functionName = "%sAbout_About" % self.prefix
+ result = self.headerdocFunction(functionName, "Return the About text as a C string.", "The About text as a C string.")
+ result += "const char *%s(void);\n" % functionName
+ return result
+
+ def MiniNotice(self):
+ functionName = "%sAbout_MiniNotice" % self.prefix
+ result = self.headerdocFunction(functionName,
+ "Return the minimum copyright notice as a C string.",
+ "The minimum copyright notice as a C string.")
+ result += "const char *%s(void);\n" % functionName
+ return result
+
+ def ShortNotice(self):
+ functionName = "%sAbout_ShortNotice" % self.prefix
+ result = self.headerdocFunction(functionName,
+ "Return the short copyright notice as a C string.",
+ "The short copyright notice as a C string.")
+ result += "const char *%s(void);\n" % functionName
+ return result
+
+ def LongNotice(self):
+ functionName = "%sAbout_LongNotice" % self.prefix
+ result = self.headerdocFunction(functionName,
+ "Return the long copyright notice as a C string.",
+ "The long copyright notice as a C string.")
+ result += "const char *%s(void);\n" % functionName
+ return result
+
+ def WhatString(self):
+ result = "/**\n"
+ result += " * Embedded string containing information for the what(1) command.\n"
+ result += " *\n"
+ result += " */\n"
+ result += "extern const char *%s_What;\n" % (self.prefix)
+ return result
+
+ def __str__(self):
+ result = "// DO NOT EDIT THIS FILE. IT IS AUTOMATICALLY GENERATED.\n"
+ result += "// longbow-generate-about @VERSION@ @DATE@\n\n"
+ result += "#ifndef %s_About_h\n" % (self.prefix)
+ result += "#define %s_About_h\n" % (cIdentifier(self.prefix))
+ result += self.WhatString() + "\n"
+ result += self.Name() + "\n"
+ result += self.Version() + "\n"
+ result += self.About() + "\n"
+ result += self.MiniNotice() + "\n"
+ result += self.ShortNotice() + "\n"
+ result += self.LongNotice() + "\n"
+ result += "#endif // %s_About_h\n" % (cIdentifier(self.prefix))
+ return result
+
+ def writeFile(self):
+ with open(self.FileName(), "w") as myfile:
+ myfile.write(str(self))
+ return
+
+class LongBowGenerateAboutCFile:
+ def __init__(self, args):
+ self.prefix = args.prefix
+ self.name = args.name
+ self.version = validateArgument(args.version)
+ self.miniNotice = ""
+ self.shortNotice = ""
+ self.longNotice = ""
+ self.about = None
+ self.what = None
+
+ self.args = args
+
+ self.miniNotice = FileUtil.readFileString(args.miniNotice)
+ self.shortNotice = FileUtil.readFileString(args.shortNotice)
+ self.longNotice = FileUtil.readFileString(args.longNotice)
+
+ self.buildDate = datetime.datetime.utcnow().isoformat()
+
+ if self.version == None:
+ self.version = " RELEASE_VERSION "
+
+ if self.about == None:
+ self.about = createQuotedCString("%s " % (self.name)) + \
+ self.version + \
+ createQuotedCString(" %s" % (self.buildDate)) + " " + \
+ createQuotedCString("\n%s" % (self.miniNotice))
+
+ if self.what == None:
+ if self.miniNotice != None:
+ notice = "\n".join(map(lambda line: "\t" + line, self.miniNotice.split("\n")[:-1]))
+ else:
+ notice = ""
+ self.what = createQuotedCString(whatLineToken) + " " + \
+ createQuotedCString(self.name + " ") + " " + \
+ self.version + " " + \
+ createQuotedCString(" " + self.buildDate) + "\n" + \
+ createQuotedCString(whatLineToken) + " " + \
+ createQuotedCString(notice)
+ return
+
+ def FileName(self):
+ return self.prefix + "_About.c"
+
+ def Name(self):
+ functionName = "%sAbout_Name" % self.prefix
+ return self.boilerPlateFunction(functionName, createQuotedCString(self.name))
+
+ def Version(self):
+ functionName = "%sAbout_Version" % self.prefix
+ return self.boilerPlateFunction(functionName, self.version)
+
+ def About(self):
+ functionName = "%sAbout_About" % self.prefix
+ return self.boilerPlateFunction(functionName, self.about)
+
+ def MiniNotice(self):
+ functionName = "%sAbout_MiniNotice" % self.prefix
+ return self.boilerPlateFunction(functionName, createQuotedCString(self.miniNotice))
+
+ def ShortNotice(self):
+ functionName = "%sAbout_ShortNotice" % self.prefix
+ return self.boilerPlateFunction(functionName, createQuotedCString(self.shortNotice))
+
+ def LongNotice(self):
+ functionName = "%sAbout_LongNotice" % self.prefix
+ return self.boilerPlateFunction(functionName, createQuotedCString(self.longNotice))
+
+ def WhatString(self):
+ return "const char *%s_What = %s;\n" % (self.prefix, self.what)
+
+ def boilerPlateFunction(self, functionName, string):
+ result = "const char *\n%s(void)\n" % functionName
+ result += "{\n"
+ result += " return %s;\n" % string
+ result += "}\n"
+ return result
+
+ def __str__(self):
+ result = "// DO NOT EDIT THIS FILE. IT IS AUTOMATICALLY GENERATED.\n"
+ result += "// longbow-generate-about @VERSION@ @DATE@\n\n"
+ result += "#include \"%s_About.h\"\n\n" % self.prefix
+ result += self.WhatString() + "\n"
+ result += self.Name() + "\n"
+ result += self.Version() + "\n"
+ result += self.About() + "\n"
+ result += self.MiniNotice() + "\n"
+ result += self.ShortNotice() + "\n"
+ result += self.LongNotice() + "\n"
+ return result
+
+ def writeFile(self):
+ with open(self.FileName(), "w") as myfile:
+ myfile.write(str(self))
+ return
+
+if __name__ == '__main__':
+ desc = '''
+@(#) longbow-generate-about @VERSION@ @DATE@
+@(#) All Rights Reserved. Use is subject to license terms.
+
+Generate C code conforming to the About contract.
+
+Create a .c and .h file pair with the specified prefix.
+For the prefix 'xyzzy', the file names are 'xyzzy_About.c' and 'xyzzy_About.h' respectively.
+
+The functions defined are:
+
+const char *xyzzyAbout_Name(void)
+const char *xyzzyAbout_Version(void)
+const char *xyzzyAbout_About(void)
+const char *xyzzyAbout_MiniNotice(void)
+const char *xyzzyAbout_ShortNotice(void)
+const char *xyzzyAbout_LongNotice(void)
+
+And the constant string const char *xyzzy_What;
+ '''
+
+ parser = argparse.ArgumentParser(prog='longbow-generate-about', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
+
+ parser.add_argument("prefix", help="The file name and function name prefix.")
+ parser.add_argument("name", help="The name of the entity this is about.")
+ parser.add_argument("version", help="The version of the entity this is about.")
+ parser.add_argument("miniNotice", help="The name of the file containing the smallest copyright or attribution notice.")
+ parser.add_argument("shortNotice", help="The name of the file containing a short copyright or attribution notice.")
+ parser.add_argument("longNotice", help="The name of the file containing a full copyright or attribution notice.")
+
+ args = parser.parse_args()
+
+ hfile = LongBowGenerateAboutHFile(args.prefix)
+ hfile.writeFile()
+
+ cfile = LongBowGenerateAboutCFile(args)
+ cfile.writeFile()
diff --git a/longbow/src/python/longbow-name-report.py b/longbow/src/python/longbow-name-report.py
new file mode 100755
index 00000000..3daca949
--- /dev/null
+++ b/longbow/src/python/longbow-name-report.py
@@ -0,0 +1,91 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import os
+import sys
+import argparse
+
+sys.path.append("../site-packages/longbow/")
+sys.path.append("@INSTALL_PYTHON_DIR@")
+import NameReport
+
+if __name__ == '__main__':
+ '''
+ @(#) name-report @VERSION@ @DATE@
+ @(#) All Rights Reserved. Use is subject to license terms.
+ '''
+ desc = '''
+Print a score representing the percentage of compliance with the naming conventions for one or more C source and object files.
+
+$ ./longbow-name-report parc_Object.c
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-names 100.00 enum-names 100.0 typedef-names 100.0
+$
+$ echo parc_Object.c | ./parc-name-grade -
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-names 100.00 enum-names 100.0 typedef-names 100.0
+$
+
+Default Output (--summary):
+namespace, module-name[, topic, score]
+
+namespace: Namespace of the file, like `parc`
+module-name: The name of the file, like `parc_ArrayList.c`
+topic: The name of the topic: valid-name, function-name-conformance, or enum-name-conformance
+score: A context-sensitive value related to the topic (valid-name: True/False, function/enum-name-conformance: 0-100)
+
+Finegrain Output (--finegrain):
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-names 100.00 enum-names 100.0 typedef-names 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_Acquire 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_AssertValid 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_Compare 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_Copy 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_Create 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_Display 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_Equals 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_GetReferenceCount 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_HashCode 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_Release 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_TestAcquireContractRaw 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object function-name parcObject_ToJSON 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/LibpgetEnumerationsFromFilesarc/parc/algol parc_Object function-name parcObject_ToString 100.0
+/Users/cwood/Projects/DistilleryBranches/Case1073/Libparc/parc/algol parc_Object typedef-name _ObjectHeader 100.0
+'''
+
+ parser = argparse.ArgumentParser(prog='longbow-name-report', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
+ parser.add_argument('-a', '--average', default=False, action="store_true", help="Print an average summary of the naming conformance results for all modules")
+ parser.add_argument('-s', '--summary', default=False, action="store_true", help="Print a summary of the naming conformance results for each module")
+ parser.add_argument('-f', '--finegrain', default=False, action="store_true", help="Print the individual results for each function, typedef, and enumeration in each module.")
+ parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str, help="the output format: \"text\" or \"csv\"")
+ parser.add_argument('-d', '--distribution', default="[99, 90]", action="store", required=False, type=str, help="a list containing the score distributions for pretty-printing. Default [99, 90]")
+ parser.add_argument('-t', '--trace', default=False, action="store_true", help="Turn on exception tracing to debug an issue with the tool.")
+ parser.add_argument('-', '--stdin', default=False, action="store_true", required=False, help="Read the list of files from standard input.")
+ parser.add_argument('-p', '--opath', default="", action="store", required=False, type=str, help="Specify the path for object files, can be a path to a static library.")
+ parser.add_argument("files", help="Files to check", nargs="*")
+
+ args = parser.parse_args()
+
+ targets = []
+ if args.stdin:
+ for line in sys.stdin:
+ targets.append(line.strip())
+ else:
+ targets = args.files
+
+ if (len(targets) == 0):
+ parser.print_usage()
+ sys.exit(1)
+
+ NameReport.commandLineMain(args, targets, args.opath)
diff --git a/longbow/src/python/longbow-preprocess.py b/longbow/src/python/longbow-preprocess.py
new file mode 100755
index 00000000..12c01c2a
--- /dev/null
+++ b/longbow/src/python/longbow-preprocess.py
@@ -0,0 +1,153 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import subprocess
+import pprint
+
+def sourceFileNameToName(sourceFileName):
+ '''
+ Given the path to a source file, return the name without any path components or trailing suffix.
+ '''
+ name = os.path.basename(sourceFileName)
+ return name.split(".")[0]
+
+def canonicalizeFunctionName(functionName):
+ '''
+ Given a function name that contains the initial '_' character,
+ strip it and return a canonicalised form of the same name suitable for a source file.
+ '''
+ if functionName[0] == "_":
+ functionName = functionName[1:]
+ return functionName
+
+def isReservedName(functionName):
+ '''
+ Given a canonicalized name, determine if it is a reserved name according to ISO/IEC 9899:2011 and ANSI Sec. 4.1.2.1,
+ identifiers that begin with an underscore and either an uppercase letter or another underscore are always reserved for any use.
+ '''
+ if functionName[0] == '_' and functionName[1] == '_':
+ return True
+ elif functionName[0] == '_' and functionName[1].isupper():
+ return True
+
+ return False
+
+
+def getDarwinTestableFunctions(sourceFileName, objectFileName):
+ '''
+ '''
+ command = [ "/usr/bin/nm", "-Um", objectFileName ]
+
+ output = subprocess.check_output(command)
+ lines = output.splitlines()
+
+ external = []
+ internal = []
+ for line in lines:
+ fields = line.split(" ")
+ if fields[1] == "(__TEXT,__text)":
+ functionName = canonicalizeFunctionName(fields[3])
+
+ if isReservedName(functionName):
+ print "Ignoring function with a ISO/IEC 9899:2011 and ANSI Sec. 4.1.2.1 reserved name: ", functionName
+ else:
+ if fields[2] == "external":
+ external.append( ( functionName ) )
+ else:
+ internal.append( ( functionName ) )
+ pass
+ pass
+ pass
+
+ external.sort()
+ internal.sort()
+ return { "Static": internal, "Global" : external }
+
+def testCases(functionList):
+ '''
+ '''
+ return { "testCases" : functionList }
+
+def testSuite(testCases):
+ '''
+ A Test Suite is comprised of one or more Test Cases
+ '''
+ if testCases == None or len(testCases) == 0:
+ return None
+ return [ testCases ]
+
+def testFixture(testFixtureName, testSuites):
+ '''
+ A Test Fixture contains an initial setup function, one or more Test Suites, and a final tear-down function.
+ '''
+ if testSuites == None:
+ return None
+ return { "name" : testFixtureName, "testSuites" : testSuites }
+
+def testRunner(testRunnerName, files, testFixtures):
+ '''
+ A Test Runner contains one or more Test Fixtures.
+ '''
+ testFixtures = [x for x in testFixtures if x is not None]
+ return { "name" : testRunnerName, "files" : files, "testFixtures" : testFixtures }
+
+def computeFileNames(argv):
+ """ Given an argument list, compute the file names to use for code generation.
+
+
+ """
+ if (argv[1].endswith(".c")):
+ return (argv[1], argv[2], sourceFileNameToName(argv[1]) + ".longbow")
+
+ return (argv[1]+".c", argv[1]+".o", sourceFileNameToName(argv[1]) + ".longbow")
+
+if __name__ == '__main__':
+ '''
+ @(#) longbow-preprocess @VERSION@ @DATE@
+ @(#) All Rights Reserved. Use is subject to license terms.
+'''
+ if len(sys.argv) <= 1:
+ print "Usage: longbow-preprocess (sourceFileName objectFileName) | (fileNamePrefix)"
+ print
+ print "Generate a plain-text intermediate form for a LongBow test case generated from"
+ print "a specified source and object file. Use longbow-code to produce a LongBow"
+ print "test runner based upon the intermediate form."
+ sys.exit(1)
+
+ fileNames = computeFileNames(sys.argv)
+
+ sourceFileName = fileNames[0]
+ objectFileName = fileNames[1]
+ outputFileName = fileNames[2]
+
+ functionDictionary = getDarwinTestableFunctions(sourceFileName, objectFileName)
+
+ testRunnerName = sourceFileNameToName(sourceFileName)
+
+ testFixtures = map(lambda(fixtureType):
+ testFixture(fixtureType, testSuite(testCases(functionDictionary[fixtureType]))), functionDictionary)
+
+ files = { "sourceFile" : sourceFileName, "objectFile" : objectFileName }
+ result = testRunner(testRunnerName, files, testFixtures)
+
+ out = open(outputFileName, "w")
+ pp = pprint.PrettyPrinter(indent=4, width=132, depth=None, stream=out)
+ pp.pprint(result)
+ out.close()
+ pass
diff --git a/longbow/src/python/longbow-size-report.py b/longbow/src/python/longbow-size-report.py
new file mode 100755
index 00000000..fd4ae76c
--- /dev/null
+++ b/longbow/src/python/longbow-size-report.py
@@ -0,0 +1,131 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import subprocess
+import difflib
+import csv
+import argparse
+sys.path.append("@INSTALL_PYTHON_DIR@")
+import LongBow
+from pprint import pprint
+
+def textOutputFile(file, maximumFileNameLength):
+ format = "%-" + str(maximumFileNameLength) + "s %6d"
+ print format % (file[0], file[1])
+ return
+
+def textSummary(files):
+ maximumFileNameLength = max(map(lambda file: len(file[0]), files))
+ map(lambda file: textOutputFile(file, maximumFileNameLength), files)
+ return
+
+def csvOutputFile(file):
+ format = "size,%s,%d"
+ print format % (file[0], file[1])
+ return
+
+def csvSummary(files):
+ map(lambda file: csvOutputFile(file), files)
+ return
+
+def textTotal(files):
+ total = sum(map(lambda file: file[1], files))
+ print total
+ return
+
+def csvTotal(files):
+ total = sum(map(lambda file: file[1], files))
+ print total
+ return
+
+def main():
+ desc = '''
+Report on number of lines of one or more C source or header files.
+
+Input is either from a list of files supplied as command line parameters,
+or as a list of newline separated file names read from standard input.
+Output is a plain text (default) or a CSV file reporting
+the file name and the total number of lines in the file.
+
+Usage:
+
+% longbow-size-report *.[ch]
+
+Report the number of lines in .c and .h files specified as command line parameters.
+
+% longbow-size-report -
+Read the lists of files from standard input, one file per line.
+
+$ longbow-size-report parc_JSON.c
+parc_JSON.c 239
+$
+$
+$ echo parc_JSON.c | longbow-size-report -o csv -
+parc_JSON.c,239
+$
+'''
+
+ parser = argparse.ArgumentParser(prog='longbow-size-report', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
+ parser.add_argument('-', '--stdin', default=False, action="store_true", required=False, help="read the list of files from standard input.")
+ parser.add_argument('-s', '--summary', default=False, action="store_true", required=False, help="display the number of lines for each file")
+ parser.add_argument('-t', '--total', default=False, action="store_true", required=False, help="display the total number of lines for all files")
+ parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str, help="the output format: \"text\" or \"csv\"")
+
+ parser.add_argument("files", help="Files to check", nargs="*")
+
+ args = parser.parse_args()
+
+ if args.summary == False and args.total == False:
+ args.summary = True
+
+ targets = []
+
+ if args.stdin:
+ for line in sys.stdin:
+ t = line.strip()
+ if len(t) > 0:
+ targets.append(t)
+ else:
+ targets = args.files
+
+ if len(targets) == 0:
+ parser.print_usage()
+ sys.exit(1)
+
+ files = map(lambda fileName: [ fileName, LongBow.countLines(fileName)], targets)
+ total = sum(map(lambda element: element[1], files))
+
+ if args.summary:
+ if args.output == "text":
+ textSummary(files)
+ else:
+ csvSummary(files)
+
+ if args.total:
+ if args.output == "text":
+ textTotal(files)
+ else:
+ csvTotal(files)
+
+if __name__ == '__main__':
+ '''
+ @(#) longbow-size-report @VERSION@ @DATE@
+ @(#) All Rights Reserved. Use is subject to license terms.
+ '''
+ main()
diff --git a/longbow/src/python/longbow-style-report.py b/longbow/src/python/longbow-style-report.py
new file mode 100755
index 00000000..05b8fbde
--- /dev/null
+++ b/longbow/src/python/longbow-style-report.py
@@ -0,0 +1,99 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import argparse
+
+sys.path.append("@INSTALL_PYTHON_DIR@")
+sys.path.append("@DEPENDENCY_PYTHON_DIR@")
+sys.path.append("../site-packages/longbow/")
+import StyleReport
+
+if __name__ == '__main__':
+ '''
+@(#) longbow-code @VERSION@ @DATE@
+ @(#) All Rights Reserved. Use is subject to license terms.
+'''
+ desc = '''
+Report on style conformance for one or more C source or header files.
+
+Input is either from a list of files supplied as command line parameters,
+or as a list of newline separated file names read from standard input.
+Reports are:
+--summary A one line per file report of the file name, number of lines, number of non-compliant lines, and a score.
+--average A single line output of the average of all scores.
+--total A single line of output of the percentage of all compliant lines to the total number of lines in all files.
+--visual A visual representation of the style check.
+
+For each of these reports, the output format is specified by:
+--output text Display text on standard output
+--output csv Display a list of comma-separated values on standard output.
+--output gui Use a graphical user interface if possible.
+
+The visual report displays either a colorized, line by line output of
+the differences between the original source file it's exemplar (-o text),
+or displays a file-merge application for interactive use ()-o gui)
+
+Example:
+
+% longbow-style-report *.[ch]
+
+Report conformance of the .c and .h files specified as command line parameters.
+
+% longbow-style-report -
+Report conformance of the .c and .h files read from standard input, one line per file.
+
+$ longbow-style-report parc_JSON.c
+parc_JSON.c 239 0 100.00$
+$
+$ echo parc_JSON.c | longbow-style-report -
+parc_JSON.c,239,0,100.00
+$
+'''
+
+ parser = argparse.ArgumentParser(prog='longbow-style-report', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
+ parser.add_argument('-', '--stdin', default=False, action="store_true", required=False, help="read the list of files from standard input only.")
+ parser.add_argument('-s', '--summary', default=False, action="store_true", required=False, help="Display the score for each file.")
+ parser.add_argument('-a', '--average', default=False, action="store_true", required=False, help="Display the simple average of all scores.")
+ parser.add_argument('-t', '--total', default=False, action="store_true", required=False, help="Display the percentage of all compliant lines to the total number of lines in all files.")
+ parser.add_argument('-d', '--distribution', default="[95, 90]", action="store", required=False, type=str, help="a list containing the score distributions for pretty-printing. Default '[95, 90]' (green >= 95, yellow >= 90, red < 90).")
+ parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str, help="the output format: 'text', 'csv', or 'gui'.")
+ parser.add_argument('-v', '--visual', default=False, action="store_true", required=False, help="Display a visual representation of the style check.")
+ parser.add_argument('-k', '--key', default="name", action="store", required=False, type=str, help="The sort key: Type '--key help' for the list.")
+ parser.add_argument('-e', '--exclude', default="", action="store", required=False, type=str, help="Exclude a comma separated set of one or more of: 'red', 'yellow', 'green'.")
+
+ parser.add_argument("files", help="Files to check", nargs="*")
+
+ args = parser.parse_args()
+
+ if args.summary == False and args.average == False and args.total == False and args.visual == False:
+ args.summary = True
+
+ targets = []
+
+ if args.stdin:
+ for line in sys.stdin:
+ t = line.strip()
+ if len(t) > 0:
+ targets.append(t)
+ else:
+ targets = args.files
+
+ UNCRUSTIFY = "@UNCRUSTIFY_BIN@"
+ UNCRUSTIFY_CONFIG = "@UNCRUSTIFY_CONFIG@"
+
+ StyleReport.commandLineMain(args, targets, UNCRUSTIFY, UNCRUSTIFY_CONFIG)
diff --git a/longbow/src/python/longbow-test-run.py b/longbow/src/python/longbow-test-run.py
new file mode 100755
index 00000000..77ed1f98
--- /dev/null
+++ b/longbow/src/python/longbow-test-run.py
@@ -0,0 +1,172 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import os
+import sys
+import re
+import pprint
+import subprocess
+import argparse
+import json
+
+class TokenParser:
+ def __init__(self, tokens=[]):
+ self.index = 0
+ self.tokens = tokens
+
+ def nextToken(self):
+ result = self.tokens[self.index]
+ self.index = self.index + 1
+ return result
+
+ def previousToken(self):
+ self.index = self.index - 1
+ result = self.tokens[self.index - 1]
+ return result
+
+ def expectedToken(self, expected):
+ token = self.nextToken()
+ if token == expected:
+ return True
+ self.index = self.index - 1
+ print "expectedToken(%s) is not the actual %s" % (expected, token)
+ return False
+
+ def end(self):
+ if self.index == len(self.tokens):
+ return True
+ return False
+
+class LongBowCodeCoverage:
+ def __init__(self):
+ return
+
+ def run(self, executableFile):
+ lines = subprocess.check_output([ "gcov", "-f", executableFile ])
+ token = map(lambda x : x.strip("'"), re.split("[ :\n]+", lines))
+ return self.parse(token)
+
+ def parseFunction(self, parser):
+ functionName = parser.nextToken()
+ parser.expectedToken("Lines")
+ parser.expectedToken("executed")
+ coverage = parser.nextToken()
+ return { "function" : functionName, "coverage" : coverage }
+
+ def parseFile(self, parser):
+ fileName = parser.nextToken()
+ parser.expectedToken("Lines")
+ parser.expectedToken("executed")
+ coverage = parser.nextToken()
+ return { "file" : fileName, "coverage" : coverage }
+
+ def parse(self, tokens):
+ parser = TokenParser(tokens)
+ functions = [ ]
+
+ while not parser.end():
+ token = parser.nextToken()
+ if (token == "Function"):
+ function = self.parseFunction(parser)
+ functions.append(function)
+ elif (token == "File"):
+ file = self.parseFile(parser)
+ pass
+
+ self.detailCoverage = { "file" : file, "functions" : functions }
+ return self.detailCoverage
+
+ def getCoverage(self):
+ result["file"]["coverage"]
+
+ def getDetailCoverage(self):
+ return self.detailCoverage
+
+
+class LongBowTestRun:
+ def __init__(self, options=[]):
+ self.options = options
+ self.mainFileName = None
+ self.exitStatus = 0
+ return
+
+ def setOptions(self, options=[]):
+ self.options = options
+ return
+
+ def getMainFileName(self):
+ return self.mainFileName
+
+ def run(self, testRunner):
+ self.mainFileName = testRunner
+ self.exitStatus = 0
+
+ try:
+ try:
+ os.remove(testRunner + ".gcda")
+ except:
+ pass
+ lines = subprocess.check_output([ testRunner ])
+ lines = re.split("[ :]+", lines)
+ self.exitStatus = 0
+ except subprocess.CalledProcessError, e:
+ self.exitStatus = e.returncode
+
+ return self.exitStatus
+
+ def report(self, detailedOutput=False, jsonOutput=False):
+ result = ""
+ if self.exitStatus == 0:
+ coverage = LongBowCodeCoverage()
+ result = coverage.run(testRunner.getMainFileName())
+
+ if detailedOutput:
+ if jsonOutput:
+ result = json.dumps(result, sort_keys=False, indent=4, separators=(',', ': '))
+ else:
+ pp = str(result)
+ pass
+ else:
+ if jsonOutput:
+ result = json.dumps(result["file"], sort_keys=False, indent=4, separators=(',', ': '))
+ else:
+ result = "PASS " + result["file"]["file"] + " " + result["file"]["coverage"]
+ else:
+ result = "FAIL " + args.testRunner
+ pass
+
+ return result
+
+
+if __name__ == '__main__':
+ testRunners = []
+ if len(sys.argv) < 2:
+ print "Usage: longbow-test-run.py testExecutable"
+ print "Run a LongBow test"
+ sys.exit(1)
+
+ parser = argparse.ArgumentParser(description='Run a LongBow Test')
+ parser.add_argument("--json", help="Produce JSON output instead of a Python dictionary.", action="store_true")
+ parser.add_argument("--detailed", help="Produce detailed output.", action="store_true")
+ parser.add_argument("testRunner", help="The name of the test executable.", nargs='+')
+ args = parser.parse_args()
+
+ testRunner = LongBowTestRun([ "--run-nonforked" ])
+
+ for test in args.testRunner:
+ exitStatus = testRunner.run(test)
+ print testRunner.report(args.detailed, args.json)
+
diff --git a/longbow/src/python/longbow-test-suite.py b/longbow/src/python/longbow-test-suite.py
new file mode 100755
index 00000000..5a6d67e5
--- /dev/null
+++ b/longbow/src/python/longbow-test-suite.py
@@ -0,0 +1,55 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import sys
+from subprocess import call
+
+class LongBowTestSuite:
+ def __init__(self):
+ self.options = []
+ return
+
+ def setOptions(self, options=[]):
+ self.options = options
+ return
+
+ def run(self, testRunners=[]):
+ result = 0
+ for test in testRunners:
+ outputFileName = test + ".log"
+ outputFile = open(outputFileName, 'w')
+ command = [ test ] + self.options
+ print command
+ status = call(command, stdout=outputFile)
+ if result == 0:
+ result = status
+ pass
+ outputFile.close()
+ pass
+ return result
+
+
+if __name__ == '__main__':
+ testRunners = []
+ if len(sys.argv) < 2:
+ print "Usage: longbow-test-suite.py testExecutable ..."
+ print "Run one or more LongBow test runners as indpendant processes"
+ sys.exit(1)
+ testRunners = testRunners + sys.argv[1:]
+
+ testSuite = LongBowTestSuite()
+ testSuite.setOptions([ "--run-nonforked" ])
+ exitStatus = testSuite.run(testRunners)
diff --git a/longbow/src/python/longbow-vocabulary-report.py b/longbow/src/python/longbow-vocabulary-report.py
new file mode 100755
index 00000000..25004428
--- /dev/null
+++ b/longbow/src/python/longbow-vocabulary-report.py
@@ -0,0 +1,66 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import argparse
+
+sys.path.append("@INSTALL_PYTHON_DIR@")
+sys.path.append("@DEPENDENCY_PYTHON_DIR@")
+sys.path.append("../site-packages/longbow/")
+import VocabularyReport
+import hfcca
+
+def main(argv):
+ desc = '''
+Print the vocabulary (number of tokens) of functions and files.
+
+The option --function displays the file name, function name,
+line number of the function, the number of tokens
+
+The default option --summary displays the file name, the average vocabulary
+of all functions in the file and a score ranging from 0 to 100.
+
+Usage:
+$ longbow-vocabulary-report parc_JSON.c
+parc_JSON.c 51.00 100.00
+$
+$ echo parc_JSON.c | longbow-vocabulary-report --function -o csv -
+vocabulary,parc_JSON.c,parcJSON_Create,49,50,100.00
+...
+$
+
+'''
+
+ parser = argparse.ArgumentParser(prog='longbow-vocabulary-report', formatter_class=argparse.RawDescriptionHelpFormatter, description=desc)
+ parser.add_argument('-s', '--summary', default=False, action="store_true", help="print the average vocabulary of each target file.")
+ parser.add_argument('-f', '--function', default=False, action="store_true", help="print the vocabulary of each function in each target file.")
+ parser.add_argument('-', '--stdin', default=False, action="store_true", required=False, help="read the list of files from standard input rather than the command line.")
+ parser.add_argument('-a', '--average', default=False, action="store_true", required=False, help="display only the simple average of the average vocabulary of each target file.")
+ parser.add_argument('-o', '--output', default="text", action="store", required=False, type=str, help="the output format: \"text\" or \"csv\"")
+ parser.add_argument("files", help="Files to check", nargs="*")
+
+ args = parser.parse_args()
+
+ VocabularyReport.commandLineMain(args, hfcca)
+
+
+if __name__ == "__main__":
+ '''
+@(#) longbow-vocabulary-report @VERSION@ @DATE@
+@(#) All Rights Reserved. Use is subject to license terms.
+ '''
+ main(sys.argv)
diff --git a/longbow/src/python/parc_uncrustify.cfg b/longbow/src/python/parc_uncrustify.cfg
new file mode 100755
index 00000000..475d8049
--- /dev/null
+++ b/longbow/src/python/parc_uncrustify.cfg
@@ -0,0 +1,115 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+indent_with_tabs = 0 # 1=indent to level only, 2=indent with tabs
+input_tab_size = 4 # original tab size
+output_tab_size = 4 # new tab size
+indent_columns = output_tab_size
+indent_label = 2 # pos: absolute col, neg: relative column
+# indent_align_string = False # align broken strings
+# indent_brace = 0
+
+nl_if_leave_one_liners = false # Don't split one-line if/else statements - 'if(a) b++;' (false/true)
+
+nl_enum_brace = remove # (ignore/add/remove/force) newline between 'enum' and '{'
+nl_union_brace = remove # "union {" vs "union \n {"
+nl_struct_brace = remove # "struct {" vs "struct \n {"
+nl_do_brace = remove # "do {" vs "do \n {"
+nl_if_brace = remove # "if () {" vs "if () \n {"
+nl_for_brace = remove # "for () {" vs "for () \n {"
+nl_else_brace = remove # "else {" vs "else \n {"
+nl_while_brace = remove # "while () {" vs "while () \n {"
+nl_switch_brace = remove # "switch () {" vs "switch () \n {"
+# nl_func_var_def_blk = 1
+# nl_before_case = 1
+nl_fcall_brace = add # "foo() {" vs "foo()\n{"
+nl_fdef_brace = add # "int foo() {" vs "int foo()\n{"
+# nl_after_return = TRUE
+nl_brace_while = remove
+nl_brace_else = remove
+nl_squeeze_ifdef = TRUE
+nl_func_type_name = add # (ignore/add/remove/force) newline between return type and function name in a function definition
+
+
+# The span for aligning struct initializer values (0=don't align)
+align_struct_init_span = 4 # number
+
+# Spaces to indent 'case' from 'switch'
+# Usually 0 or indent_columns.
+indent_switch_case = 4 # number
+
+# Make all if/elseif/else statements in a chain be braced or not. Overrides mod_full_brace_if.
+# If any must be braced, they are all braced. If all can be unbraced, then the braces are removed.
+mod_full_brace_if_chain = false # false/true
+# mod_paren_on_return = add # "return 1;" vs "return (1);"
+mod_full_brace_if = add # "if (a) a--;" vs "if (a) { a--; }"
+mod_full_brace_for = add # "for () a--;" vs "for () { a--; }"
+mod_full_brace_do = add # "do a--; while ();" vs "do { a--; } while ();"
+mod_full_brace_while = add # "while (a) a--;" vs "while (a) { a--; }"
+mod_remove_empty_return = true # Remove a void 'return;' that appears as the last statement in a function. (false/true)
+
+sp_after_ptr_star = remove # (ignore/add/remove/force) space after pointer star '*', if followed by a word.
+sp_func_proto_paren = remove # (ignore/add/remove/force) A space between function name and '(' on function declaration
+sp_return_paren = force # (ignore/add/remove/force) a space between 'return' and '('
+sp_before_semi = remove
+sp_paren_paren = remove # space between (( and ))
+sp_sizeof_paren = remove # "sizeof (int)" vs "sizeof(int)"
+sp_before_sparen = force # "if (" vs "if("
+sp_after_sparen = force # "if () {" vs "if (){"
+sp_after_cast = add # "(int) a" vs "(int)a"
+sp_inside_braces = force # "{ 1 }" vs "{1}"
+sp_inside_braces_struct = force # "{ 1 }" vs "{1}"
+sp_inside_braces_enum = force # "{ 1 }" vs "{1}"
+sp_inside_paren = remove # "( 1 )" vs "(1)"
+sp_inside_fparen = remove # "( 1 )" vs "(1)" - functions
+sp_inside_sparen = remove # "( 1 )" vs "(1)" - if/for/etc
+sp_type_func = add # ignore/add/remove/force A space between return type and function name
+sp_assign = force
+sp_arith = force
+sp_bool = force
+sp_compare = force
+sp_after_comma = force
+sp_func_def_paren = remove # "int foo (){" vs "int foo(){"
+sp_func_call_paren = remove # "foo (" vs "foo("
+sp_func_proto_paren = remove # "int foo ();" vs "int foo();"
+sp_paren_brace = add # Force a space between ')' and '{'
+sp_else_brace = add # Add or remove space between 'else' and '{' if on the same line (ignore/add/remove/force)
+sp_brace_else = force # Add or remove space between '}' and 'else' if on the same line (ignore/add/remove/force)
+
+# align_with_tabs = FALSE # use tabs to align
+# align_on_tabstop = FALSE # align on tabstops
+# align_enum_equ_span = 4
+# align_nl_cont = TRUE
+# align_var_def_span = 2
+# align_var_def_inline = TRUE
+# align_var_def_star = TRUE
+# align_var_def_colon = TRUE
+# align_assign_span = 1
+# align_struct_init_span = 3
+# align_var_struct_span = 3
+# align_right_cmt_span = 3
+# align_pp_define_span = 3
+# align_pp_define_gap = 4
+# align_number_left = TRUE
+# align_typedef_span = 5
+# align_typedef_gap = 3
+
+cmt_star_cont = TRUE # put a star on subsequent comment lines
+
+eat_blanks_before_close_brace = TRUE
+eat_blanks_after_open_brace = TRUE
+
+# Add or remove space between pointer stars '*'
+sp_between_ptr_star = remove
diff --git a/longbow/src/python/site-packages/CMakeLists.txt b/longbow/src/python/site-packages/CMakeLists.txt
new file mode 100644
index 00000000..fab750f7
--- /dev/null
+++ b/longbow/src/python/site-packages/CMakeLists.txt
@@ -0,0 +1,12 @@
+install(FILES longbow.pth DESTINATION ${INSTALL_BASE_PYTHON_DIR})
+install(FILES longbow/LongBow.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/FileUtil.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/GCov.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/GCovSummary.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/ANSITerm.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/SymbolTable.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/Language_C.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/StyleReport.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/CoverageReport.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/VocabularyReport.py DESTINATION ${INSTALL_PYTHON_DIR})
+install(FILES longbow/NameReport.py DESTINATION ${INSTALL_PYTHON_DIR})
diff --git a/longbow/src/python/site-packages/longbow.pth b/longbow/src/python/site-packages/longbow.pth
new file mode 100755
index 00000000..9f0d4f64
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow.pth
@@ -0,0 +1,18 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+#
+# longbow package configuration
+longbow
diff --git a/longbow/src/python/site-packages/longbow/.gitignore b/longbow/src/python/site-packages/longbow/.gitignore
new file mode 100644
index 00000000..0d20b648
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/longbow/src/python/site-packages/longbow/ANSITerm.py b/longbow/src/python/site-packages/longbow/ANSITerm.py
new file mode 100755
index 00000000..8594c949
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/ANSITerm.py
@@ -0,0 +1,62 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import os
+import subprocess
+import re
+import sys
+import pprint
+
+ansiRed = "\x1b[31m";
+ansiGreen = "\x1b[32m";
+ansiYellow = "\x1b[33m";
+ansiBlue = "\x1b[34m";
+ansiMagenta = "\x1b[35m";
+ansiCyan = "\x1b[36m";
+ansiReset = "\x1b[0m";
+
+def colorize(color, chars):
+
+ result = chars
+ if color == "red":
+ result = ansiRed + chars + ansiReset
+ elif color == "green":
+ result = ansiGreen + chars + ansiReset
+ elif color == "yellow":
+ result = ansiYellow + chars + ansiReset
+ elif color == "blue":
+ result = ansiBlue + chars + ansiReset
+ elif color == "magenta":
+ result = ansiMagenta + chars + ansiReset
+ elif color == "cyan":
+ result = ansiCyan + chars + ansiReset
+ else:
+ print >> sys.stderr, "Bad color name:", color
+
+ return result
+
+
+def printColorized(color, string):
+ print colorize(color, string)
+ return
+
+
+class ANSITerm:
+ def __init__(self):
+ return
+
+ def printColorized(self, color, string):
+ print colorize(color, string)
diff --git a/longbow/src/python/site-packages/longbow/CoverageReport.py b/longbow/src/python/site-packages/longbow/CoverageReport.py
new file mode 100755
index 00000000..c18ae056
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/CoverageReport.py
@@ -0,0 +1,262 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import sys
+import os
+import re
+import subprocess
+import difflib
+import csv
+# import argparse
+import pprint
+# sys.path.append("${INSTALL_PYTHON_DIR}")
+# sys.path.append("${DEPENDENCY_PYTHON_DIR}")
+# sys.path.append("../site-packages/longbow/")
+import LongBow
+import GCov
+import GCovSummary
+import FileUtil
+import ANSITerm
+import Language_C
+
+def checkTestExecutable(executableFileName):
+ result = False
+
+ if not os.path.exists(executableFileName):
+ return result
+
+ path = os.path.dirname(executableFileName)
+ pattern = os.path.basename(executableFileName)+'*.gcda'
+ if not Language_C.findFiles(path, pattern):
+ return result
+
+ pattern = os.path.basename(executableFileName)+'*.gcno'
+ if not Language_C.findFiles(path, pattern):
+ return result
+
+ result = True
+ return result
+
+def findTestExecutable(fileName, hints=[]):
+ '''
+Given a file name, look in the canonical places for a corresponding LongBow test file.
+ '''
+ directoryName = os.path.dirname(fileName)
+ if len(directoryName) == 0:
+ directoryName = "."
+
+ file = Language_C.Module(fileName)
+
+ possibleTestFiles = list()
+ for hint in hints:
+ possibleTestFiles.append(hint + "/" + file.getExecutableName())
+ possibleTestFiles.append(hint + "/" + file.getTestExecutableName())
+ possibleTestFiles.append(directoryName + "/" + file.getExecutableName())
+ possibleTestFiles.append(directoryName + "/" + file.getTestExecutableName())
+ possibleTestFiles.append(directoryName + "/test/" + file.getTestExecutableName())
+
+ result = None
+ for possibleTestFile in possibleTestFiles:
+ if checkTestExecutable(possibleTestFile) == True:
+ result = os.path.abspath(possibleTestFile)
+ break
+
+ return result
+
+
+def textSummary(args, filesAndTests, gCovResults, prefix=""):
+
+ summary = GCov.computeSummary(filesAndTests, gCovResults)
+
+ if not args.includeTestSources:
+ summary = GCovSummary.removeTestSourceFiles(summary)
+
+ if len(summary) == 0:
+ return
+
+ if args.explain:
+ pp = pprint.PrettyPrinter(indent=2, width=150)
+ pp.pprint(summary)
+
+ maximumFileLength = max(map(lambda entry: len(entry), summary))
+
+ format = "%s%-" + str(maximumFileLength) + "s %6s"
+ print format % (prefix, "File Path", "Score")
+
+ format = "%s%-" + str(maximumFileLength) + "s %6.2f"
+ for testedFile in sorted(summary.keys()):
+ string = format % (prefix, testedFile, summary[testedFile]["coverage"])
+ if summary[testedFile]["direct"] == "indirect":
+ ANSITerm.printColorized("magenta", string)
+ else:
+ LongBow.scorePrinter(eval(args.distribution), summary[testedFile]["coverage"], string)
+
+ return
+
+
+def textAverage(args, filesAndTests, gcovResults):
+ summary = GCov.computeSummary(filesAndTests, gcovResults)
+
+ if not args.includeTestSources:
+ summary = GCovSummary.removeTestSourceFiles(summary)
+
+ score = GCovSummary.averageCoverage(summary)
+
+ LongBow.scorePrinter(eval(args.distribution), score, "%.2f" % (score))
+ return score
+
+
+def csvSummary(args, filesAndTests, gCovResults):
+ summary = GCov.computeSummary(filesAndTests, gCovResults)
+
+ if not args.includeTestSources:
+ summary = GCovSummary.removeTestSourceFiles(summary)
+
+ if len(summary) > 0:
+ for testedFile in sorted(summary.keys()):
+ outputString = "%s,%.2f" % (testedFile, summary[testedFile]["coverage"])
+ LongBow.scorePrinter(eval(args.distribution), summary[testedFile]["coverage"], outputString)
+
+ return
+
+
+def csvAverage(args, filesAndTests, gcovResults):
+ summary = GCov.computeSummary(filesAndTests, gcovResults)
+
+ if not args.includeTestSources:
+ summary = GCovSummary.removeTestSourceFiles(summary)
+
+ score = GCovSummary.averageCoverage(summary)
+
+ LongBow.scorePrinter(eval(args.distribution), score, "%.2f" % (score))
+ return
+
+
+def textVisualDisplayGcovLine(line):
+ token = line.split(":", 2)
+ if len(token) == 3:
+ if token[0] == "#####":
+ print ANSITerm.colorize("red", token[1] + " " + token[2])
+ elif token[0] == "$$$$$":
+ print ANSITerm.colorize("yellow", token[1] + " " + token[2])
+ else:
+ print ANSITerm.colorize("green", token[1] + " " + token[2])
+
+ return
+
+
+def textVisual(args, filesAndTests, gcovResults):
+
+ summary = GCov.computeSummary(filesAndTests, gcovResults)
+ if args.explain:
+ pp = pprint.PrettyPrinter(indent=2, width=150)
+ pp.pprint(summary)
+ pp.pprint(filesAndTests)
+
+ for entry in filesAndTests:
+ print entry[0]
+ try:
+ gcovLines = summary[entry[0]]["gcovLines"]
+ map(lambda line: textVisualDisplayGcovLine(line.strip()), gcovLines)
+ except KeyError:
+ print >> sys.stderr, "No coverage information for", entry[0]
+
+ return
+
+
+def displaySummary(args, filesAndTests, newGCovResults):
+ if args.output == "text":
+ textSummary(args, filesAndTests, newGCovResults)
+ elif args.output == "csv":
+ csvSummary(args, filesAndTests, newGCovResults)
+ else:
+ print >> sys.stderr, "Unsupported output type"
+ return
+
+
+def displayAverage(args, filesAndTests, gcovResults):
+ if args.output == "text":
+ textAverage(args, filesAndTests, gcovResults)
+ elif args.output == "csv":
+ csvAverage(args, filesAndTests, gcovResults)
+ else:
+ print >> sys.stderr, "Unsupported output type"
+ return
+
+
+def explain(args, filesAndTests, gcovResults):
+
+ pp = pprint.PrettyPrinter(indent=2, width=150)
+ pp.pprint(gcovResults)
+
+ return
+
+def getFilesAndTests(fileNames, testDirs=[]):
+ namesAndPaths = map(lambda fileName: [fileName, os.path.abspath(fileName)], fileNames)
+ filesAndTests = map(lambda nameAndPath: [ nameAndPath[0], findTestExecutable(nameAndPath[1], testDirs) ], namesAndPaths)
+ return filesAndTests
+
+
+def gradeAndPrint(targets, testDirs=[], problemsOnly=False, prefix=""):
+ filesAndTests = getFilesAndTests(targets, testDirs)
+ newGCovResults = map(lambda fileAndTestFile: GCov.getCoverage(fileAndTestFile[1]), filesAndTests)
+
+ summarys = GCov.computeSummary(filesAndTests, newGCovResults)
+ if len(summarys) < 1:
+ print "%sNo GCov Results - Please be sure to run 'make check' first" % prefix
+ return False
+ summarys = GCovSummary.removeTestSourceFiles(summarys)
+
+ paths = summarys.keys()
+ if problemsOnly:
+ paths = filter(lambda key: summarys[key]["coverage"] < 100, paths)
+
+ distribution=[99,90]
+ maximumFileLength = max(map(lambda entry: len(os.path.relpath(entry)), paths))
+ format = "%s%-" + str(maximumFileLength) + "s %6s"
+ print format % (prefix, "File Path", "Score")
+ format = "%s%-" + str(maximumFileLength) + "s %6.2f"
+ for path in sorted(paths):
+ string = format % (prefix, os.path.relpath(path), summarys[path]["coverage"])
+ LongBow.scorePrinter(distribution, summarys[path]["coverage"], string)
+
+ return True
+
+def commandLineMain(args, fileNames, testDir=""):
+
+ testDirs = []
+ if testDir:
+ testDirs.append(testDir)
+ fileNames = map(lambda fileName: os.path.abspath(fileName), fileNames)
+ filesAndTests = map(lambda fileName: [fileName, findTestExecutable(fileName, testDirs)], fileNames)
+
+ filesWithNoTest = filter(lambda fileAndTest: fileAndTest[1] == None, filesAndTests)
+ if len(filesWithNoTest) != 0:
+ outputFormat = "%s has no corresponding test executable or coverage data.\n"
+ map(lambda filesAndTests: sys.stderr.write(outputFormat % (filesAndTests[0])), filesWithNoTest)
+
+ gCovResults = map(lambda fileAndTestFile: GCov.getCoverage(fileAndTestFile[1]), filesAndTests)
+
+ if args.summary is True:
+ displaySummary(args, filesAndTests, gCovResults)
+ elif args.average is True:
+ displayAverage(args, filesAndTests, gCovResults)
+ elif args.visual is True:
+ textVisual(args, filesAndTests, gCovResults)
+ elif args.explain is True:
+ explain(args, filesAndTests, gCovResults)
+
+ return True
diff --git a/longbow/src/python/site-packages/longbow/DoxygenReport.py b/longbow/src/python/site-packages/longbow/DoxygenReport.py
new file mode 100755
index 00000000..46edf047
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/DoxygenReport.py
@@ -0,0 +1,161 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import pprint
+import subprocess
+import difflib
+import csv
+import LongBow
+
+def concatenateContinuationLines(lines):
+ '''
+ Parse doxygen log lines.
+ Lines that are indented by a space are continutations of the previous line.
+ '''
+ result = list()
+ accumulator = ""
+ for line in lines:
+ line = line.rstrip()
+ if line.startswith(" ") == False and line.startswith(" ") == False:
+ if len(accumulator) > 0:
+ result.append(accumulator)
+ accumulator = line
+ else:
+ accumulator = accumulator + " " + line.lstrip()
+
+ result.append(accumulator)
+
+ return result
+
+def parseLine(line):
+ result = None
+ if not line.startswith("<"):
+ fields = line.split(":")
+ if len(fields) >= 4:
+ result = { "fileName" : fields[0].strip(),
+ "lineNumber" : int(fields[1].strip()),
+ "type" : "documentation",
+ "severity" : fields[2].strip(),
+ "message" : " ".join(fields[3:]).strip()}
+ elif line.startswith("error"):
+ print line
+ elif len(line) > 0:
+ print "Consider using doxygen -s:", line
+
+ return result
+
+def canonicalize(lines):
+ lines = concatenateContinuationLines(lines)
+ parsedLines = map(lambda line: parseLine(line), lines)
+ parsedLines = filter(lambda line: line != None, parsedLines)
+ return parsedLines
+
+def organize(entries):
+ result = dict()
+
+ for entry in entries:
+ if not entry["fileName"] in result:
+ result[entry["fileName"]] = dict()
+
+ entryByFile = result[entry["fileName"]]
+
+ if not str(entry["lineNumber"]) in entryByFile:
+ entryByFile[str(entry["lineNumber"])] = list()
+ if not entry in entryByFile[str(entry["lineNumber"])]:
+ entryByFile[str(entry["lineNumber"])].append(entry)
+
+ return result
+
+def textualSummary(distribution, documentation):
+ maxWidth = 0
+ for entry in documentation:
+ if len(entry) > maxWidth:
+ maxWidth = len(entry)
+
+ formatString ="%-" + str(maxWidth) + "s %8d %8d %.2f%%"
+ for entry in documentation:
+ badLines = len(documentation[entry])
+ totalLines = LongBow.countLines(entry)
+ score = float(totalLines - badLines) / float(totalLines) * 100.0
+ LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score))
+ return
+
+def textualAverage(distribution, documentation, format):
+ sum = 0.0
+
+ for entry in documentation:
+ badLines = len(documentation[entry])
+ totalLines = LongBow.countLines(entry)
+ score = float(totalLines - badLines) / float(totalLines) * 100.0
+ sum = sum + score
+
+ if len(documentation) == 0:
+ averageScore = 100.0
+ else:
+ averageScore = sum / float(len(documentation))
+
+ LongBow.scorePrinter(distribution, averageScore, format % averageScore)
+
+def csvSummary(distribution, documentation):
+ formatString ="documentation,%s,%d,%d,%.2f%%"
+ for entry in documentation:
+ badLines = len(documentation[entry])
+ totalLines = LongBow.countLines(entry)
+ score = float(totalLines - badLines) / float(totalLines) * 100.0
+ LongBow.scorePrinter(distribution, score, formatString % (entry, totalLines, badLines, score))
+ return
+
+
+def gradeAndPrint(targets, doxLogfile, problemsOnly=False, prefix=""):
+ with open(doxLogfile, 'r') as f:
+ lines = f.readlines()
+
+ lines = canonicalize(lines)
+
+ result = organize(lines)
+
+ pp = pprint.PretyPrinter(intent=len(prefix))
+
+ distribution=[100, 95]
+ textualSummary(distribution, result)
+ return True
+
+def commandLineMain(args, fileNames):
+ if not args.summary and not args.average:
+ args.summary = True
+
+ with open(args.doxygenlog, 'r') as f:
+ lines = f.readlines()
+
+ lines = canonicalize(lines)
+
+ result = organize(lines)
+
+ pp = pprint.PrettyPrinter(indent=4)
+ #pp.pprint(result)
+
+ distribution = eval(args.distribution)
+ if args.summary:
+ if args.output == "text":
+ textualSummary(distribution, result)
+ else:
+ csvSummary(distribution, result)
+
+ if args.average:
+ textualAverage(distribution, result, "%.2f")
diff --git a/longbow/src/python/site-packages/longbow/FileUtil.py b/longbow/src/python/site-packages/longbow/FileUtil.py
new file mode 100755
index 00000000..ae3113f6
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/FileUtil.py
@@ -0,0 +1,102 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import csv
+import subprocess
+
+def readFileLines(fileName):
+ '''
+ Get the entire file into memory as a list of lines.
+ '''
+ result = None
+
+ with open(fileName, "r") as file:
+ result = file.readlines()
+
+ return result
+
+def readFileString(fileName):
+ '''
+ Get the entire file into memory as a python string.
+ '''
+ result = None
+
+ if fileName != None and len(fileName) > 0:
+ with open (fileName, "r") as file:
+ result = file.read()
+
+ return result
+
+def sourceFileNameToName(sourceFileName):
+ '''
+ Given the path to a source file, return the name without any path components or trailing suffix.
+ '''
+ name = os.path.basename(sourceFileName)
+ return name.split(".")[0]
+
+def canonicalizeFunctionName(functionName):
+ '''
+ Given a function name that contains the initial '_' character,
+ strip it and return a canonicalised form of the same name suitable for a source file.
+ '''
+ if functionName[0] == "_":
+ functionName = functionName[1:]
+ return functionName
+
+def isReservedName(functionName):
+ '''
+ Given a canonicalized name, determine if it is a reserved name according to ISO/IEC 9899:2011 and ANSI Sec. 4.1.2.1,
+ identifiers that begin with an underscore and either an uppercase letter or another underscore are always reserved for any use.
+ '''
+ if functionName[0] == '_' and functionName[1] == '_':
+ return True
+ elif functionName[0] == '_' and functionName[1].isupper():
+ return True
+ return False
+
+def getDarwinTestableFunctions(objectFileName):
+ '''
+ Retrieve a set of local and global function names within a file.
+ '''
+ command = [ "/usr/bin/nm", "-gUm", objectFileName ]
+
+ output = subprocess.check_output(command)
+ lines = output.splitlines()
+
+ external = []
+ internal = []
+ for line in lines:
+ if line:
+ fields = line.split(" ")
+ if (len(fields) > 1) and (fields[1] == "(__TEXT,__text)"):
+ functionName = canonicalizeFunctionName(fields[3])
+
+ if not isReservedName(functionName):
+ if fields[2] == "external":
+ external.append( ( functionName ) )
+ else:
+ internal.append( ( functionName ) )
+ pass
+ pass
+ pass
+ pass
+
+ external.sort()
+ internal.sort()
+ return { "Local": internal, "Global" : external }
diff --git a/longbow/src/python/site-packages/longbow/GCov.py b/longbow/src/python/site-packages/longbow/GCov.py
new file mode 100755
index 00000000..c2705fda
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/GCov.py
@@ -0,0 +1,232 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import os
+import subprocess
+import re
+import sys
+import pprint
+import FileUtil
+import Language_C
+
+class GCov:
+ def __init__(self):
+ return
+
+def canonicalizeLines(lines):
+ result = []
+ accumulatedLine = ""
+ for line in lines:
+ line = line.strip()
+ if len(line) == 0:
+ if len(accumulatedLine.strip()) > 0:
+ result.append(accumulatedLine.strip())
+ accumulatedLine = ""
+ elif "creating" in line:
+ if len(accumulatedLine.strip()) > 0:
+ result.append(accumulatedLine.strip())
+ accumulatedLine = ""
+ result.append(line)
+ else:
+ accumulatedLine = accumulatedLine + " " + line
+ return result
+
+def executeGCovCommand(testExecutableFileName):
+ currentDirectory = os.getcwd()
+ targetDirectory = os.path.dirname(os.path.abspath(testExecutableFileName))
+ testExecutableBaseName = os.path.basename(testExecutableFileName)
+
+ os.chdir(targetDirectory)
+ objects = Language_C.findFiles("./", testExecutableBaseName+"*.o")
+ if not objects:
+ return
+ objdir = os.path.dirname(objects[0])
+ gcdas = Language_C.findFiles("./", testExecutableBaseName+"*.gcda")
+ if not gcdas:
+ return
+ gcda = gcdas[0]
+ gcnos = Language_C.findFiles("./", testExecutableBaseName+"*.gcno")
+ if not gcnos:
+ return
+ gcno = gcnos[0]
+ proc = subprocess.Popen(['gcov', '-af', '-o='+objdir, '-gcda='+gcda, '-gcno='+gcno, testExecutableBaseName], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ os.chdir(currentDirectory)
+
+ inputLines = map(lambda line: line.strip(), proc.stdout)
+
+ return canonicalizeLines(inputLines)
+
+def parseFunctionLine(line):
+ # Function 'TestFixture_Global_TearDown' Lines executed:71.43% of 7"
+ search = re.search("Function '(.*)' Lines executed:(.*)% of (.*)", line, re.IGNORECASE)
+
+ result = []
+ if search:
+ functionName = search.group(1)
+ percentage = search.group(2)
+ totalLines = search.group(3)
+ result = { functionName : { "coverage" : float(percentage), "numberOfLines" : int(totalLines) } }
+
+ return result
+
+def parseFileLine(testExecutableDirectoryName, line):
+ # File './../parc_Buffer.c' Lines executed:92.69% of 424
+ search = re.search("File '(.*)' Lines executed:(.*)% of (.*)", line, re.IGNORECASE)
+
+ result = { }
+ if search:
+ baseName = os.path.basename(search.group(1));
+ fileName = os.path.abspath(testExecutableDirectoryName + "/" + baseName)
+ percentage = search.group(2)
+ totalLines = search.group(3)
+ result = { fileName : { "coverage" : float(percentage), "totalLines" : int(totalLines) } }
+
+ return result
+
+def parseCreatingLine(testExecutableDirectoryName, line):
+ search = re.search("(.*):creating '(.*)'", line, re.IGNORECASE)
+
+ result = None
+ if search:
+ baseName = os.path.basename(search.group(1));
+ fileName = os.path.abspath(testExecutableDirectoryName + "/" + baseName)
+ baseName = os.path.basename(search.group(2));
+ gcovFileName = os.path.abspath(testExecutableDirectoryName + "/" + baseName)
+
+ result = { "fileName" : fileName, "gcovFileName" : gcovFileName, "gcovLines" : FileUtil.readFileLines(gcovFileName) }
+
+ return result
+
+
+def computeCoverageFromGCovLines(testExecutableDirectoryName, testExecutableFileName, lines):
+ '''
+ This produces a dictionary consisting of:
+
+ 'testedFiles' : dictionary containing as keys 'functions' and the name of a file that was tested
+
+ The value of the key that is the name of a file that was tested is a dictionary containing the keys,
+ 'coverage', 'gcovFileName', and 'gcovLines'
+
+ 'coverage' is the percentage of code executed
+
+ 'testedFunctions' is a list containing lists consisting of the function name, the percent executed, and the number of lines in the function.
+ '''
+ testedFiles = { }
+ testedFunctions = { }
+ gcovFileNames = []
+ for line in lines:
+ if line.startswith("Function"):
+ element = parseFunctionLine(line)
+ testedFunctions.update(element)
+ elif line.startswith("File"):
+ element = parseFileLine(testExecutableDirectoryName, line)
+ testedFiles.update(element)
+ else:
+ element = parseCreatingLine(testExecutableDirectoryName, line)
+ if element != None:
+ fileName = element["fileName"]
+ del element["fileName"]
+ testedFiles[fileName].update(element)
+ pass
+
+ result = { testExecutableFileName : { "testedFunctions" : testedFunctions, "testedFiles" : testedFiles } }
+
+ return result
+
+
+def noCoverage():
+ result = { "testedFiles" : { }, "testedFunctions" : { } }
+ return result
+
+def getCoverage(testExecutableFileName):
+ '''
+ '''
+ if testExecutableFileName == None:
+ return None
+
+ testExecutableFileName = os.path.abspath(testExecutableFileName)
+ testExecutableDirectoryName = os.path.dirname(testExecutableFileName)
+ gcovLines = executeGCovCommand(testExecutableFileName)
+
+ return computeCoverageFromGCovLines(testExecutableDirectoryName, testExecutableFileName, gcovLines)
+
+
+def selectGreaterCoverage(testedFileA, testedFileB):
+ result = testedFileB
+ if testedFileA["coverage"] >= testedFileB["coverage"]:
+ result = testedFileA
+
+ return result
+
+def computeSummary(filesAndTests, newGCovResults):
+ '''
+ First, for each target file named in the gcov results, find the corresponding testedFile and report the maximum coverage.
+
+ If the target file is not in any of the testedFiles
+
+ { targetFileName : { "coverage": percent, "veracity" : "direct" / "indirect" } }
+ '''
+
+ newGCovResults = filter(lambda entry: entry != None, newGCovResults)
+
+ result = dict()
+ for entry in newGCovResults:
+ for testExecutableName in entry:
+ testExecutableCSourceName = Language_C.Module(testExecutableName).getCSourceName()
+
+ for testedFileName in entry[testExecutableName]["testedFiles"]:
+ testedFile = entry[testExecutableName]["testedFiles"][testedFileName]
+
+ if Language_C.Module(testedFileName).getTestExecutableName() == os.path.basename(testExecutableName):
+ result[testedFileName] = testedFile
+ result[testedFileName]["direct"] = "direct"
+ elif testedFileName in result:
+ bestCoverage = selectGreaterCoverage(testedFile, result[testedFileName])
+ if result[testedFileName] != bestCoverage:
+ result[testedFileName] = bestCoverage
+ result[testedFileName]["direct"] = "indirect"
+ else:
+ result[testedFileName] = testedFile
+ result[testedFileName]["direct"] = "indirect"
+
+ return result
+
+def computeAverage(filesAndTests, gcovResults):
+ summary = computeSuperSummary(filesAndTests, gcovResults)
+
+ filesToAverage = removeTestSourceFiles(summary)
+
+ score = 0.0
+
+ if len(filesToAverage) > 0:
+ sum = reduce(lambda x, y: x + y, map(lambda entry: summary[entry]["coverage"], filesToAverage))
+ score = sum / float(len(filesToAverage))
+
+ return score
+
+
+if __name__ == '__main__':
+ pp = pprint.PrettyPrinter(indent=4, width=132)
+ if True:
+ gcovResult = getCoverage("/Users/gscott/Documents/workspace/Distillery/Libparc/parc/algol/test/test_parc_JSON")
+ else:
+ lines = sys.stdin.readlines()
+ lines = canonicalizeLines(lines)
+ pp.pprint(lines)
+
+ gcovResult = computeCoverageFromGCovLines("/Users/gscott/Documents/workspace/Distillery/Libparc/parc/algol/test/", lines)
+
+ pp.pprint(gcovResult)
diff --git a/longbow/src/python/site-packages/longbow/GCovSummary.py b/longbow/src/python/site-packages/longbow/GCovSummary.py
new file mode 100755
index 00000000..bfa6710a
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/GCovSummary.py
@@ -0,0 +1,42 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import os
+import subprocess
+import re
+import sys
+import pprint
+import FileUtil
+import Language_C
+
+
+def removeTestSourceFiles(gcovSummary):
+ deleteThese = filter(lambda entry: Language_C.Module(entry).isTestSourceName(), gcovSummary)
+
+ for entry in deleteThese:
+ del gcovSummary[entry]
+
+ return gcovSummary
+
+
+def averageCoverage(summary):
+ score = 0.0
+
+ if len(summary) > 0:
+ sum = reduce(lambda x, y: x + y, map(lambda entry: summary[entry]["coverage"], summary))
+ score = sum / float(len(summary))
+
+ return score
diff --git a/longbow/src/python/site-packages/longbow/Language_C.py b/longbow/src/python/site-packages/longbow/Language_C.py
new file mode 100755
index 00000000..85183133
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/Language_C.py
@@ -0,0 +1,202 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import os
+import glob
+
+import fnmatch
+import subprocess
+
+def findFiles(startDir, pattern):
+ matches = []
+ for root, dirnames, filenames in os.walk(startDir):
+ for filename in fnmatch.filter(filenames, pattern):
+ matches.append(os.path.join(root, filename))
+
+ return matches
+
+def getLibPathForObject(libraryPath, filename):
+ '''
+ Returns a path to an object file suitable for nm
+ '''
+ result = ''
+
+ command = ['/usr/bin/ar', '-t', libraryPath]
+ output = subprocess.check_output(command)
+ lines = output.splitlines()
+
+ for line in lines:
+ tokens = line.split('.')
+ if tokens[0] == filename:
+ result = libraryPath + '(' + line + ')'
+ break
+
+ return result
+
+class Module:
+ '''Represent a C language module.
+ A module consists of the file names of the C source, C header file, object file, and an executable file
+ def __init__(self, srcPath, objectDirs=[]):
+ self.path = self.initialzePath(srcPath)
+ if not objectDirs:
+ objectDirs = [self.path]
+ split = srcPath.split("/")
+ self.originalName = split[len(split) - 1]
+ self.originalBaseName = os.path.basename(srcPath)
+ tokens = self.originalBaseName.split('.')
+ self.fileName = tokens[0].replace("/","")
+
+ if self.fileName.startswith("test_"):
+ self.fileName = self.fileName[5:]
+
+ # Search for an appropriate object
+ self.objectPath = "";
+ for objectDir in objectDirs:
+ if objectDir.endswith(".a"):
+ self.objectPath = getLibPathForObject(objectDir, self.fileName)
+ if self.objectPath:
+ break
+ else:
+ objectSearchPath = os.path.join(objectDir, self.fileName) + "*.o*"
+ ofiles = glob.glob(objectSearchPath);
+ if ofiles:
+ # if we've found some matches, assume we want the first.
+ self.objectPath = ofiles[0];
+ break
+ return
+
+ def isTestExecutableName(self):
+ return self.getTestExecutableName() == self.originalBaseName
+
+ def isTestSourceName(self):
+ return self.getTestSourceName() == self.originalBaseName
+
+ def isCSourceName(self):
+ return self.getCSourceName() == self.originalBaseName
+
+ def isCHeaderName(self):
+ return self.getCHeaderName() == self.originalBaseName
+
+ def getCSourceName(self):
+ return self.fileName + ".c"
+
+ def getCHeaderName(self):
+ return self.fileName + ".h"
+
+ def getPathToObjectFile(self):
+ return self.objectPath;
+
+ def getExecutableName(self):
+ return self.fileName
+
+ def getTestExecutableName(self):
+ return "test_" + self.fileName
+
+ def getTestSourceName(self):
+ return self.getTestExecutableName() + ".c"
+
+ def getNamespace(self):
+ sourceFileName = self.getCSourceName()
+ if (sourceFileName.find("_") >= 0):
+ stripped = sourceFileName[0:sourceFileName.index("_")]
+ return stripped
+ else:
+ return None
+
+ def getModuleName(self):
+ sourceFileName = self.getCSourceName()
+ split = sourceFileName.split("/")
+ sourceFileName = split[len(split) - 1]
+ if (sourceFileName.find(".") >= 0):
+ stripped = sourceFileName[0:sourceFileName.index(".")]
+ return stripped
+ else:
+ return None
+
+ def getModulePrefix(self):
+ sourceFileName = self.getCSourceName()
+ squashed = sourceFileName.replace("_", "")
+ if (squashed.find(".") >= 0):
+ return squashed[0:squashed.index(".")]
+ else:
+ return None
+
+ def getTypeName(self):
+ sourceFileName = self.getCSourceName()
+ if (sourceFileName.find(".") >= 0 and sourceFileName.find("_") >= 0):
+ stripped = sourceFileName[(sourceFileName.index("_") + 1):sourceFileName.index(".")]
+ return stripped
+ else:
+ return None
+
+ def getModulePath(self):
+ return self.path
+
+ def initialzePath(self, sourceFileName):
+ parts = sourceFileName.split("/")
+ parts = parts[0:len(parts) - 1]
+ return '/'.join(map(str, parts))
+
+if __name__ == '__main__':
+ cFile = Module("file.c.gcov")
+ if cFile.getCSourceName() != "file.c":
+ print "getCSourceName failed", cFile.getCSourceName()
+
+ if cFile.getCHeaderName() != "file.h":
+ print "getCHeaderName failed", cFile.getCHeaderName()
+
+ if cFile.getTestSourceName() != "test_file.c":
+ print "getTestSourceName failed", cFile.getTestSourceName()
+
+ if cFile.getTestExecutableName() != "test_file":
+ print "getTestExecutableName failed", cFile.getTestExecutableName()
+
+ if cFile.getNamespace() != None:
+ print "getNamespace failed", cFile.getNamespace()
+
+ if cFile.getModuleName() != "file":
+ print "getModuleName failed", cFile.getModuleName()
+
+ if cFile.getModulePrefix() != "file":
+ print "getModulePrefix failed", cFile.getModulePrefix()
+
+ if cFile.getTypeName() != None:
+ print "getTypeName failed", cFile.getTypeName()
+
+ cFile = Module("parc_Object.c.gcov")
+ if cFile.getCSourceName() != "parc_Object.c":
+ print "getCSourceName failed", cFile.getCSourceName()
+
+ if cFile.getCHeaderName() != "parc_Object.h":
+ print "getCHeaderName failed", cFile.getCHeaderName()
+
+ if cFile.getTestSourceName() != "test_parc_Object.c":
+ print "getTestSourceName failed", cFile.getTestSourceName()
+
+ if cFile.getTestExecutableName() != "test_parc_Object":
+ print "getTestExecutableName failed", cFile.getTestExecutableName()
+
+ if cFile.getNamespace() != "parc":
+ print "getNamespace failed", cFile.getNamespace()
+
+ if cFile.getModuleName() != "parc_Object":
+ print "getModuleName failed", cFile.getModuleName()
+
+ if cFile.getModulePrefix() != "parcObject":
+ print "getModulePrefix failed", cFile.getModulePrefix()
+
+ if cFile.getTypeName() != "Object":
+ print "getTypeName failed", cFile.getTypeName()
diff --git a/longbow/src/python/site-packages/longbow/LongBow.py b/longbow/src/python/site-packages/longbow/LongBow.py
new file mode 100755
index 00000000..d1f0e77a
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/LongBow.py
@@ -0,0 +1,96 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import sys
+
+ansiRed = "\x1b[31m";
+ansiGreen = "\x1b[32m";
+ansiYellow = "\x1b[33m";
+ansiMagenta = "\x1b[35m";
+ansiReset = "\x1b[0m";
+
+
+def buildRed(string):
+ if sys.stdout.isatty():
+ return ansiRed + string + ansiReset
+ else:
+ return string
+
+
+def buildGreen(string):
+ if sys.stdout.isatty():
+ return ansiGreen + string + ansiReset
+ else:
+ return string
+
+
+def buildYellow(string):
+ if sys.stdout.isatty():
+ return ansiYellow + string + ansiReset
+ else:
+ return string
+
+
+def score(distribution, score):
+ result = "red"
+
+ if (score > distribution[0]):
+ result = "green"
+ elif (score > distribution[1]):
+ result = "yellow"
+
+ return result
+
+
+def scoreBuilder(distribution, score, string):
+ '''
+ scores is a list of 2 decreasing values.
+ The first is the minimum score for green, the second is the minimum score for yellow.
+ The rest art red
+ '''
+ if (score > distribution[0]):
+ return buildGreen(string)
+ elif (score > distribution[1]):
+ return buildYellow(string)
+ else:
+ return buildRed(string)
+
+
+def scorePrinter(distribution, score, string):
+ print scoreBuilder(distribution, score, string)
+
+
+def countLines(fileName):
+ i = 0
+ with open(fileName) as f:
+ for i, l in enumerate(f):
+ pass
+ return i + 1
+
+
+def CFileNameToFunctionPrefix(fileName):
+ '''
+ Given the name of a C source file or header file,
+ return the canonical name prefix for functions within that file.
+ For example, the input name "parc_Buffer.c" results in "parcBuffer_"
+ '''
+ fileName = os.path.basename(fileName);
+ fileNameSpace = os.path.splitext(fileName)[0]
+ parts = fileNameSpace.partition("_")
+ result = None
+ if len(parts) == 3:
+ result = parts[0] + parts[2] + "_"
+ return result
diff --git a/longbow/src/python/site-packages/longbow/NameReport.py b/longbow/src/python/site-packages/longbow/NameReport.py
new file mode 100755
index 00000000..1d38b4ec
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/NameReport.py
@@ -0,0 +1,818 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import subprocess
+import argparse
+import csv
+import traceback
+
+import LongBow
+from Language_C import Module
+from FileUtil import *
+from pprint import pprint
+
+class NoObjectFileException(Exception):
+ def __init__(self, value):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+
+# Global error string formatting map
+conformanceErrorFormatMap = {
+ "NAMESPACE_MISMATCH" : "Function signature %s does not have the correct namespace prefix (%s)",
+ "MODULE_MISMATCH" : "Function signature %s does not have the correct module prefix (%s)",
+ "INVALID_STATIC_NAME" : "Local function signature %s must begin with an underscore '_'.",
+ "INVALID_GLOBAL_ENUM_NAME" : "Global enumeration prefix %s must match the module namespace %s.",
+ "INVALID_STATIC_ENUM_NAME" : "Local enumeration prefix %s must begin with an underscore '_'.",
+ "INVALID_GLOBAL_ENUM_VALUE_NAME" : "Global enumeration value prefix %s must match the enum name %s.",
+ "INVALID_GLOBAL_TYPEDEF_NAME" : "Global typedef prefix %s must match the module namespace %s.",
+ "INVALID_STATIC_TYPEDEF_NAME" : "Local typedef prefix %s must begin with an underscore '_'.",
+}
+
+def tuplesListToCSV(points):
+ '''
+ Convert a list of tuples -- data points -- to a list of CSV-formatted strings.
+ '''
+ lines = []
+ for point in points:
+ line = ""
+ for i in range(len(point) - 1):
+ line = line + str(point[i]) + ","
+ line = line + str(point[-1])
+ lines.append(line)
+ return lines
+
+def tuplesListToPrettyText(points, distribution = [99, 90]):
+ '''
+ Convert a list of tuples -- data points -- to a list of colorized strings based
+ on the provided distribution.
+ '''
+ lines = []
+ for point in points:
+ line = ""
+ for i in range(len(point) - 1):
+ line = line + str(point[i]) + " "
+ line = line + str(point[-1])
+ lines.append(LongBow.scoreBuilder(distribution, point[-1], line))
+ return lines
+
+def writeListToStream(llist, fileHandle, appendNewLine = True):
+ '''
+ Write the list of lines to the given file stream handle, appending a new line after
+ every line if told to do so.
+ '''
+ for line in llist:
+ fileHandle.write(line)
+ if appendNewLine:
+ fileHandle.write("\n")
+
+def isValidModuleName(namespace, typeName, moduleName):
+ '''
+ Determine if a given module name is valid (conforming) given the namespace and typename.
+
+ For example, a module in the `parc` namespace with typename `JSON` must be named `parc_JSON`.
+ '''
+ constructedName = namespace + "_" + typeName
+ return (moduleName == constructedName)
+
+def isValidFunctionName(namespace, modulePrefix, functionName, scope):
+ '''
+ Determine if a given function name is valid (conforming) given the namespace and typename.
+ '''
+ if (scope != "Local"):
+ if(not functionName.startswith(namespace)):
+ return False, conformanceErrorFormatMap["NAMESPACE_MISMATCH"] % ('"' + functionName + '"', namespace)
+ elif (not functionName.startswith(modulePrefix)):
+ return False, conformanceErrorFormatMap["MODULE_MISMATCH"] % ('"' + functionName + '"', modulePrefix)
+ else:
+ return True, ""
+ elif (scope == "Local" and not functionName.startswith("_")):
+ return False, conformanceErrorFormatMap["INVALID_STATIC_NAME"] % ('"' + functionName + '"')
+ else:
+ return True, ""
+
+def isValidTypedefName(typePrefix, name, static):
+ '''
+ Determine if a given typedef name is valid (conforming) given the namespace and typename.
+ '''
+ originalName = name
+ originalTypePrefix = typePrefix
+
+ name = name.lower()
+ typePrefix = typePrefix.lower()
+
+ if ((not static) and name.startswith(typePrefix)) and originalName[0].isupper():
+ return True, ""
+ elif ((not static) and (not name.startswith(typePrefix))):
+ return False, conformanceErrorFormatMap["INVALID_GLOBAL_TYPEDEF_NAME"] % ('"' + originalName + '"', originalTypePrefix)
+ elif (static and name.startswith('_')):
+ return True, ""
+ else:
+ return False, conformanceErrorFormatMap["INVALID_STATIC_TYPEDEF_NAME"] % ('"' + originalName + '"')
+
+def isValidEnumName(typePrefix, name, values, static):
+ '''
+ Determine if a given enumeration name is valid (conforming) given the namespace and typename.
+ '''
+ originalName = name
+ originalTypePrefix = typePrefix
+
+ name = name.lower()
+ typePrefix = typePrefix.lower()
+
+ if ((not static) and name.startswith(typePrefix)) and originalName[0].isupper():
+ pass
+ elif ((not static) and (not name.startswith(typePrefix))):
+ return False, conformanceErrorFormatMap["INVALID_GLOBAL_ENUM_NAME"] % ('"' + originalName + '"', originalTypePrefix)
+ elif (static and name.startswith('_')):
+ pass
+ else:
+ return False, conformanceErrorFormatMap["INVALID_STATIC_ENUM_NAME"] % ('"' + originalName + '"')
+
+ for enumVal in values:
+ if ((not static) and not enumVal.startswith(originalName)):
+ return False, conformanceErrorFormatMap["INVALID_GLOBAL_ENUM_VALUE_NAME"] % ('"' + enumVal + '"', originalName)
+ return True, ""
+
+def getTypedefs(path, source):
+ '''
+ Retrieve the names of typedefs (excluding enums) in a given file by parsing the file for
+ typedef specifications. This walks over every line in the file searching for each one,
+ since we cannot extract enum names nicely using linux tools.
+ '''
+ typedefs = []
+ pathToFile = os.path.join(path, source)
+ if os.path.isfile(pathToFile):
+ fin = open(os.path.join(path, source), 'r')
+ lines = fin.readlines()
+ i = 0 # LC
+ while (i < len(lines)):
+ line = lines[i].strip()
+ if not line.startswith("//"):
+ values = []
+ if "typedef" in line and "enum" not in line: # go to the end of the typedef
+ if "{" in line:
+ while "}" not in line:
+ i = i + 1
+ line = lines[i].strip()
+ name = line.replace("}","").replace(";","").strip()
+ typedefs.append(name)
+ else:
+ splits = line.split(" ")
+ if "struct" in splits[1]:
+ name = splits[3].replace(";","")
+ typedefs.append(name.strip())
+ else:
+ pass
+ i = i + 1
+ return typedefs
+
+def getEnumerations(path, source):
+ '''
+ Retrieve the names of enumerations in a given file by parsing the file for enum specifications.
+ This walks over every line in the file searching for enums, since we cannot extract enum names
+ nicely using linux tools.
+ '''
+ enums = []
+ pathToFile = os.path.join(path, source)
+ if os.path.isfile(pathToFile):
+ fin = open(os.path.join(path, source), 'r')
+ lines = fin.readlines()
+ i = 0 # LC
+ while (i < len(lines)):
+ line = lines[i].strip()
+ if not line.startswith("//"):
+ values = []
+ if "typedef enum" in line: # go to the end of the enumeration
+ while (i + 1 < len(lines) and line.find("}") < 0):
+ i = i + 1
+ line = lines[i].strip()
+ values.append(line) # append each string value
+ if (line.find("}") >= 0):
+ name = line.replace("}","").replace(";","")
+ values.pop(len(values) - 1)
+ enums.append((name.strip(), values))
+ i = i + 1
+ return enums
+
+def getTypedefsFromFiles(fileInfoList):
+ '''
+ Get the typedefs from each file in the fileInfoList.
+
+ Each element in fileInfoList is a tuple of the form (path, filename, staticTag),
+ where staticTag is a flag used to indicate if the typedefs in said file should
+ be treated as static.
+ '''
+ allTypedefs = []
+ for (path, fileName, staticTag) in fileInfoList:
+ typedefs = getTypedefs(path, fileName)
+ for typedef in typedefs:
+ allTypedefs.append((typedef, staticTag))
+ return allTypedefs
+
+def getEnumerationsFromFiles(fileInfoList):
+ '''
+ Get the enums from each file in the fileInfoList.
+
+ Each element in fileInfoList is a tuple of the form (path, filename, staticTag),
+ where staticTag is a flag used to indicate if the enums in said file should
+ be treated as static.
+ '''
+ allEnums = []
+ for (path, fileName, staticTag) in fileInfoList:
+ enums = getEnumerations(path, fileName)
+ for (enumName, values) in enums:
+ allEnums.append((enumName, values, staticTag))
+ return allEnums
+
+class FunctionConformanceContainer():
+ def __init__(self, module):
+ self.path = module.getModulePath()
+ self.module = module
+ self.failedFunctions = []
+ self.passedFunctions = []
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ self.computeConformance()
+
+ def computeConformance(self):
+ functionDictionary = {}
+ objectFileName = self.module.getPathToObjectFile()
+ sourceFileName = os.path.join(self.path, self.module.getCSourceName())
+ temp = objectFileName
+ if '.a' in temp: # If this is a path into an archive
+ temp = temp.split('(')[0]
+ if not os.path.isfile(temp):
+ raise NoObjectFileException("You must compile " + str(sourceFileName) + " to generate a corresponding object or provide a special object file path")
+
+ try:
+ functionDictionary = getDarwinTestableFunctions(objectFileName)
+ except:
+ raise Exception("You must compile " + str(sourceFileName) + " to generate a corresponding object or provide a special object file path")
+
+ namespace = self.module.getNamespace()
+ modulePrefix = self.module.getModulePrefix()
+
+ # Find all passing/failing functions
+ if (namespace != None and modulePrefix != None):
+ for scope in functionDictionary:
+ for functionName in functionDictionary[scope]:
+ isValid, reason = isValidFunctionName(namespace, modulePrefix, functionName, scope)
+ if isValid:
+ self.addPassedFunction(functionName)
+ else:
+ self.addFailedFunction(functionName, reason)
+
+ def containsMainFunction(self):
+ for (functionName, reason) in self.failedFunctions:
+ if (functionName.strip() == "main" or functionName.strip().startswith("main")):
+ return True
+ for functionName in self.passedFunctions:
+ if (functionName.strip() == "main" or functionName.strip().startswith("main")):
+ return True
+ return False
+
+ @staticmethod
+ def getType():
+ return "function-names"
+
+ def addFailedFunction(self, function, reason):
+ self.failedFunctions.append((function, reason))
+
+ def getFailedFunctions(self):
+ return self.failedFunctions
+
+ def addPassedFunction(self, function):
+ self.passedFunctions.append(function)
+
+ def getPassedFunctions(self):
+ return self.passedFunctions
+
+ def analyzeConformance(self):
+ '''
+ Convert the raw pass/fail function results into a set of individual
+ data points (finegrain results) and overall percentage.
+ '''
+ numPassed = len(self.passedFunctions)
+ numFailed = len(self.failedFunctions)
+
+ self.percentage = 100.0
+ self.points = []
+
+ if (numPassed + numFailed > 0): # skip invalid entries
+ self.percentage = float(float(numPassed) / float(numFailed + numPassed)) * 100.0
+
+ # Data point schema:
+ # namespace, moduleName, targetName, topic, line, col, score
+ for fname in self.passedFunctions:
+ data = ["function-name", fname, 100.0]
+ self.points.append(data)
+
+ for (fname, reason) in self.failedFunctions:
+ data = ["function-name", fname, reason, 0.0]
+ self.points.append(data)
+
+ def getNumberOfPassed(self):
+ return len(self.passedFunctions)
+
+ def getNumberOfFailed(self):
+ return len(self.failedFunctions)
+
+ def totalCSV(self):
+ return tuplesListToCSV(map(lambda point : [self.fullPath] + point, self.points))
+
+ def totalText(self, distribution):
+ formattedLines = tuplesListToPrettyText(self.points, distribution)
+ return map(lambda formattedLine : self.fullPath + " " + formattedLine, formattedLines)
+
+ def summaryCSV(self):
+ line = [(self.getType(), self.percentage)]
+ return tuplesListToCSV(line)
+
+ def summaryText(self, distribution):
+ line = [(self.getType(), self.percentage)]
+ return tuplesListToPrettyText(line, distribution)
+
+ def getScore(self):
+ return (self.getType(), self.percentage)
+
+
+class EnumConformanceContainer():
+ def __init__(self, module):
+ self.path = module.getModulePath()
+ self.module = module
+ self.failedEnums = []
+ self.passedEnums = []
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ self.computeConformance()
+
+ def computeConformance(self):
+ sourceFileName = self.module.getCSourceName()
+ headerFileName = self.module.getCHeaderName()
+
+ enums = getEnumerationsFromFiles([(self.path, sourceFileName, True), (self.path, headerFileName, False)])
+ modulePrefix = self.module.getModulePrefix()
+ if (modulePrefix != None):
+ for (enumName, values, staticTag) in enums:
+ isValid, reason = isValidEnumName(modulePrefix, enumName, values, staticTag)
+ if isValid:
+ self.addPassedEnum(enumName)
+ else:
+ self.addFailedEnum(enumName, reason)
+
+ @staticmethod
+ def getType():
+ return "enum-names"
+
+ def addFailedEnum(self, enum, reason):
+ self.failedEnums.append((enum, reason))
+
+ def getFailedEnums(self):
+ return self.failedEnums
+
+ def addPassedEnum(self, enum):
+ self.passedEnums.append(enum)
+
+ def getPassedEnums(self):
+ return self.passedEnums
+
+ def analyzeConformance(self):
+ '''
+ Convert the raw pass/fail enum results into a set of individual
+ data points (finegrain results) and overall percentage.
+ '''
+ self.enumPercentage = 100.0
+ self.points = []
+ numPassed = len(self.passedEnums)
+ numFailed = len(self.failedEnums)
+
+ if (numPassed + numFailed > 0):
+ self.enumPercentage = float((float(numPassed) / float(numPassed + numFailed)) * 100)
+
+ for ename in self.passedEnums:
+ data = ["enum-name", ename, 100.0]
+ self.points.append(data)
+
+ for (ename, reason) in self.failedEnums:
+ data = ["enum-name", ename, reason, 0.0]
+ self.points.append(data)
+
+ def getNumberOfPassed(self):
+ return len(self.passedEnums)
+
+ def getNumberOfFailed(self):
+ return len(self.failedEnums)
+
+ def totalCSV(self):
+ return tuplesListToCSV(map(lambda point : [self.fullPath] + point, self.points))
+
+ def totalText(self, distribution):
+ formattedLines = tuplesListToPrettyText(self.points, distribution)
+ return map(lambda formattedLine : self.fullPath + " " + formattedLine, formattedLines)
+
+ def summaryCSV(self):
+ line = [(self.getType(), self.enumPercentage)]
+ return tuplesListToCSV(line)
+
+ def summaryText(self, distribution):
+ line = [(self.getType(), self.enumPercentage)]
+ return tuplesListToPrettyText(line, distribution)
+
+ def getScore(self):
+ return (self.getType(), self.enumPercentage)
+
+class TypedefConformanceContainer():
+ def __init__(self, module):
+ self.path = module.getModulePath()
+ self.module = module
+ self.failedTypedefs = []
+ self.passedTypedefs = []
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ self.computeConformance()
+
+ def computeConformance(self):
+ sourceFileName = self.module.getCSourceName()
+ headerFileName = self.module.getCHeaderName()
+
+ typedefs = getTypedefsFromFiles([(self.path, sourceFileName, True), (self.path, headerFileName, False)])
+
+ modulePrefix = self.module.getModulePrefix()
+ if (modulePrefix != None):
+ for (typedefName, staticTag) in typedefs:
+ isValid, reason = isValidTypedefName(modulePrefix, typedefName, staticTag)
+ if isValid:
+ self.addPassedTypedef(typedefName)
+ else:
+ self.addFailedTypedef(typedefName, reason)
+
+ @staticmethod
+ def getType():
+ return "typedef-names"
+
+ def addFailedTypedef(self, typedef, reason):
+ self.failedTypedefs.append((typedef, reason))
+
+ def getFailedTypedefs(self):
+ return self.failedTypedefs
+
+ def addPassedTypedef(self, typedef):
+ self.passedTypedefs.append(typedef)
+
+ def getPassedTypedefs(self):
+ return self.passedTypedefs
+
+ def analyzeConformance(self):
+ '''
+ Convert the raw pass/fail typedef results into a set of individual
+ data points (finegrain results) and overall percentage.
+ '''
+ self.points = []
+ self.typedefPercentage = 100.0
+ numPassed = len(self.passedTypedefs)
+ numFailed = len(self.failedTypedefs)
+ if (numPassed + numFailed > 0):
+ self.typedefPercentage = float(float(numPassed) / float(numFailed + numPassed)) * 100
+
+ for tName in self.passedTypedefs:
+ data = ["typedef-name", tName, 100.0]
+ self.points.append(data)
+
+ for (tName, reason) in self.failedTypedefs:
+ data = ["typedef-name", tName, reason, 0.0]
+ self.points.append(data)
+
+ def getNumberOfPassed(self):
+ return len(self.passedTypedefs)
+
+ def getNumberOfFailed(self):
+ return len(self.failedTypedefs)
+
+ def totalCSV(self):
+ return tuplesListToCSV(map(lambda point : [self.fullPath] + point, self.points))
+
+ def totalText(self, distribution):
+ formattedLines = tuplesListToPrettyText(self.points, distribution)
+ return map(lambda formattedLine : self.fullPath + " " + formattedLine, formattedLines)
+
+ def summaryCSV(self):
+ line = [(self.getType(), self.typedefPercentage)]
+ return tuplesListToCSV(line)
+
+ def summaryText(self, distribution):
+ line = [(self.getType(), self.typedefPercentage)]
+ return tuplesListToPrettyText(line, distribution)
+
+ def getScore(self):
+ return (self.getType(), self.typedefPercentage)
+
+class ModuleConformanceContainer():
+ '''
+ This conformance container stores a collection of individual type naming
+ conformance results within a particular module, and uses the information
+ contained therein to provide total finegrain and summarized
+ results of the conformance results for each type.
+ '''
+
+ def __init__(self, module):
+ self.conformanceContainers = []
+ self.path = module.getModulePath()
+ self.module = module
+ self.validName = False
+ self.process = True
+
+ if (len(self.path) > 0):
+ self.fullPath = self.path + os.sep + module.getCSourceName()
+ else:
+ self.fullPath = module.getCSourceName()
+
+ def setProcess(self, value):
+ self.process = value
+
+ def processModule(self):
+ return self.process
+
+ def addConformanceContainer(self, complianceContainer):
+ self.conformanceContainers.append(complianceContainer)
+
+ def analyzeConformance(self):
+ for container in self.conformanceContainers:
+ container.analyzeConformance()
+
+ def getNumberOfPassed(self):
+ tuples = []
+ for container in self.conformanceContainers:
+ tuples.append((container.getType(), container.getNumberOfPassed()))
+ return tuples # list of (topic, # of passed)
+
+ def getNumberOfFailed(self):
+ tuples = []
+ for container in self.conformanceContainers:
+ tuples.append((container.getType(), container.getNumberOfFailed()))
+ return tuples # list of (topic, # of failed)
+
+ def totalCSV(self):
+ csvTuples = []
+ for container in self.conformanceContainers:
+ csvTuples = csvTuples + container.totalCSV()
+ return csvTuples
+
+ def totalText(self, distribution):
+ textTuples = []
+ for container in self.conformanceContainers:
+ textTuples = textTuples + container.totalText(distribution)
+ return textTuples
+
+ def summaryCSV(self):
+ singleTuple = [self.fullPath]
+ for container in self.conformanceContainers:
+ csvGroup = container.summaryCSV()
+ singleTuple = singleTuple + [csvGroup[-1]]
+ return tuplesListToCSV([tuple(singleTuple)])
+
+ def summaryText(self, distribution, divider=' '):
+ formattedLine = self.fullPath
+ for container in self.conformanceContainers:
+ lineGroup = container.summaryText(distribution)[0].split(" ")
+ formattedLine = formattedLine + divider + lineGroup[-2] + ' ' + lineGroup[-1]
+ return [formattedLine]
+
+ def getScores(self):
+ scores = {}
+ for container in self.conformanceContainers:
+ scoreKey, scoreVal = container.getScore()
+ scores[scoreKey] = scoreVal
+ return scores
+
+class ModuleSetConformanceContainer():
+ '''
+ This conformance container stores a collection of individual module naming
+ conformance results, and uses the information contained therein to provide
+ summaries of conformance results.
+ '''
+
+ def __init__(self):
+ self.conformanceList = []
+
+ def addConformanceContainer(self, container):
+ self.conformanceList.append(container)
+
+ def analyzeConformance(self):
+ passed = {} # passed type-number bucket
+ failed = {} # failed type-number bucket
+
+ for container in self.conformanceList:
+ passedSet = container.getNumberOfPassed()
+ for (conformanceType, number) in passedSet:
+ if (conformanceType in passed):
+ passed[conformanceType] = passed[conformanceType] + number
+ else:
+ passed[conformanceType] = number
+ failedSet = container.getNumberOfFailed()
+ for (conformanceType, number) in failedSet:
+ if (conformanceType in failed):
+ failed[conformanceType] = failed[conformanceType] + number
+ else:
+ failed[conformanceType] = number
+
+ self.typeConformancePercentages = {}
+ for conformanceType in passed:
+ total = passed[conformanceType] + failed[conformanceType]
+ percentage = 100.0
+ if (total > 0):
+ percentage = (float(passed[conformanceType]) / float(total)) * 100.0
+ self.typeConformancePercentages[conformanceType] = percentage
+
+ def summaryCSV(self):
+ collatedTuple = ["average-scores"]
+ for conformanceType in self.typeConformancePercentages:
+ collatedTuple.append(conformanceType) # append type
+ collatedTuple.append(self.typeConformancePercentages[conformanceType]) # append percentage
+ return tuplesListToCSV([tuple(collatedTuple)])
+
+ def summaryText(self, distribution):
+ formattedLine = "average-scores"
+ for conformanceType in self.typeConformancePercentages:
+ prettyTypeText = tuplesListToPrettyText([(conformanceType, self.typeConformancePercentages[conformanceType])])[0]
+ formattedLine = formattedLine + " " + prettyTypeText
+ return [formattedLine]
+
+def computeModuleNameConformance(module):
+ '''
+ Compute the module name conformance. There is no container for this result
+ since it's a simple boolean.
+ '''
+ namespace = module.getNamespace()
+ moduleName = module.getModuleName()
+ typeName = module.getTypeName()
+
+ if (namespace != None and moduleName != None and typeName != None):
+ return isValidModuleName(namespace, typeName, moduleName)
+ else:
+ return False
+
+def computeModuleConformance(module):
+ '''
+ Compute the naming conformance results for an entire module,
+ which includes conformance results for all types contained therein.
+ '''
+ moduleContainer = ModuleConformanceContainer(module)
+
+ # Get the compliance results for functions, memorizing whether or not a main function was seen
+ functionContainer = FunctionConformanceContainer(module)
+ moduleContainer.addConformanceContainer(functionContainer)
+ moduleContainer.setProcess(not functionContainer.containsMainFunction())
+
+ # Now handle enums, typedefs, etc.
+ moduleContainer.addConformanceContainer(EnumConformanceContainer(module))
+ moduleContainer.addConformanceContainer(TypedefConformanceContainer(module))
+ moduleContainer.setValidName = computeModuleNameConformance(module)
+
+ # Now that we have the data, run over it to generate the results
+ moduleContainer.analyzeConformance()
+
+ return moduleContainer
+
+def getConformanceHeaders():
+ headers = [FunctionConformanceContainer.getType(), EnumConformanceContainer.getType(), TypedefConformanceContainer.getType()]
+ return headers
+
+def gradeAndPrint(targets, objectDirs, problemsOnly=False, printPrefix=""):
+ if len(targets) < 1:
+ print "No Files To Grade"
+ return
+
+ distribution = [99, 90]
+ maxFileNameLength = max(max(map(lambda target: len(target), targets)), len("File Name"))
+
+ moduleConformanceSet = ModuleSetConformanceContainer()
+ headers = getConformanceHeaders()
+ pformat = '{prefix}{:<{maxFileNameLength}}'
+ nformat = pformat
+ for header in headers:
+ nformat = nformat + '{:>15}'
+ print nformat.format('File Name', *headers, prefix=printPrefix, maxFileNameLength=maxFileNameLength)
+
+
+ for target in targets:
+ module = Module(target, objectDirs)
+ if module.isTestSourceName():
+ continue
+ fileNamePrefix = module.getModuleName()
+ path = module.getModulePath()
+ try:
+ moduleConformance = computeModuleConformance(module)
+ if not moduleConformance.processModule():
+ pass
+ else:
+ moduleConformanceSet.addConformanceContainer(moduleConformance)
+ scores = moduleConformance.getScores()
+ minScore = 100.0
+ for key in scores:
+ score = scores[key]
+ if score < minScore:
+ minScore = score
+ scores[key] = '%3.1f'%score
+ if problemsOnly and minScore == 100.0:
+ continue
+ printVals=[]
+ for hval in headers:
+ score = 'N/A'
+ if hval in scores:
+ score = scores[hval]
+ printVals.append(score)
+ line = nformat.format(target, *printVals, prefix=printPrefix, maxFileNameLength=maxFileNameLength)
+ LongBow.scorePrinter(distribution, minScore, line)
+ except NoObjectFileException as e:
+ eformat = pformat + "Could Not Grade: No .o file found for file"
+ line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e)
+ print LongBow.buildRed(line)
+ pass
+ except Exception as e:
+ eformat = pformat + "Could Not Grade: {msg}"
+ line = eformat.format(target, prefix=printPrefix, maxFileNameLength=maxFileNameLength, msg=e)
+ print LongBow.buildRed(line)
+ pass
+ moduleConformanceSet.analyzeConformance()
+
+
+def commandLineMain(args, targets, objectDir):
+ distribution = eval(args.distribution)
+ moduleConformanceSet = ModuleSetConformanceContainer()
+
+ summary = args.summary
+ average = args.average
+ finegrain = args.finegrain
+ if not (summary or average or finegrain):
+ summary = True
+
+ objectDirs = [objectDir]
+ for i in range(len(targets)):
+ module = Module(targets[i], objectDirs)
+ prefix = module.getModuleName()
+ path = module.getModulePath()
+
+ tb = None
+ try:
+ moduleConformance = computeModuleConformance(module)
+ if not moduleConformance.processModule():
+ print >> sys.stderr, "Skipping module " + str(prefix) + ": contains a `main` function"
+ else:
+ moduleConformanceSet.addConformanceContainer(moduleConformance)
+
+ if summary:
+ if args.output == "text":
+ writeListToStream(moduleConformance.summaryText(distribution), sys.stdout)
+ else:
+ writeListToStream(moduleConformance.summaryCSV(), sys.stdout)
+
+ if finegrain:
+ if args.output == "text":
+ writeListToStream(moduleConformance.totalText(distribution), sys.stdout)
+ else:
+ writeListToStream(moduleConformance.totalCSV(), sys.stdout)
+
+ except Exception as e:
+ tb = traceback.format_exc()
+ print >> sys.stderr, "Error: can't analyze conformance of " + os.path.join(path, prefix) + ": " + str(e)
+ finally:
+ if tb != None and args.trace:
+ print tb
+ pass
+
+ moduleConformanceSet.analyzeConformance()
+ if average:
+ if args.output == "text":
+ writeListToStream(moduleConformanceSet.summaryText(distribution), sys.stdout)
+ else:
+ writeListToStream(moduleConformanceSet.summaryCSV(), sys.stdout)
diff --git a/longbow/src/python/site-packages/longbow/StyleReport.py b/longbow/src/python/site-packages/longbow/StyleReport.py
new file mode 100755
index 00000000..7e8d72e0
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/StyleReport.py
@@ -0,0 +1,382 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import os
+import tempfile
+import subprocess
+import difflib
+import csv
+import argparse
+
+import LongBow
+import ANSITerm
+import FileUtil
+import pprint
+
+def getExemplar(fileName, command, config):
+ """Create the exemplar formatted file into memory as a string"""
+
+ with open(fileName) as inputFile:
+ result = subprocess.check_output([command, "-q", "-c", config], stdin=inputFile)
+ return result;
+
+
+def diff(exemplar, fileName):
+ d = difflib.Differ()
+ differ = d.compare(exemplar.splitlines(), fileName.splitlines())
+ return differ
+
+
+class Ratchet:
+ def __init__(self):
+ self.currentValue = 0
+ self.signal = 0
+
+ def value(self):
+ return self.currentValue
+
+ def toggle(self, signal):
+ if self.signal == "-":
+ if signal == "-":
+ self.currentValue = self.currentValue + 1
+ elif signal == "?":
+ self.currentValue = self.currentValue + 1
+ self.signal = 0
+ else:
+ self.currentValue = self.currentValue + 1
+ self.signal = 0
+ pass
+ elif self.signal == "+":
+ if signal == "-":
+ self.currentValue = self.currentValue + 1
+ elif signal == "?":
+ self.currentValue = self.currentValue + 1
+ self.signal = 0
+ else:
+ self.currentValue = self.currentValue + 1
+ self.signal = 0
+ pass
+ else:
+ self.signal = signal;
+
+ return self.currentValue
+
+
+def computeNonCompliantLines(differ):
+ lines = 0
+ changes = Ratchet()
+
+ for l in differ:
+ if l.startswith('-'):
+ changes.toggle(l[0])
+ lines = lines - 1
+ elif l.startswith('+'):
+ changes.toggle(l[0])
+ lines = lines + 1
+ elif l.startswith('?'):
+ pass
+ elif l.startswith(' '):
+ lines = lines +1
+ else:
+ print "What is this:", l
+
+ return changes.value()
+
+
+def reportWhy(differ):
+ print '\n'.join(diff)
+ return
+
+
+class SyntaxCompliance:
+ def __init__(self, fileName, exemplarCommand, exemplarConfig):
+ self.fileName = fileName
+ self.nonCompliantLines = 0
+ self.score = 0
+ self.exemplarCommand = exemplarCommand
+ self.exemplarConfig = exemplarConfig
+ try:
+ self.fileData = FileUtil.readFileString(self.fileName)
+ self.totalLines = len(self.fileData.splitlines())
+ except IOError, e:
+ print >> sys.stderr, e
+ sys.exit(1)
+ pass
+
+ def check(self):
+ self.exemplarData = getExemplar(self.fileName, self.exemplarCommand, self.exemplarConfig)
+ differ = diff(self.fileData, self.exemplarData)
+
+ self.nonCompliantLines = computeNonCompliantLines(differ)
+
+ return self
+
+ def report(self):
+ result = { "fileName" : self.fileName,
+ "label": "style",
+ "score": self.getScore(),
+ "totalLines" : self.getTotalLines(),
+ "nonCompliantLines" : self.getNonCompliantLines()
+ }
+ return result
+
+ def getFileName(self):
+ return self.fileName
+
+ def getExemplarCommand(self):
+ return self.exemplarCommand;
+
+ def getExemplarConfig(self):
+ return self.exemplarConfig;
+
+ def getScore(self):
+ result = 0
+ try:
+ result = int(100 * (1.0 - (float(self.getNonCompliantLines()) / float(self.getTotalLines()))))
+ except ZeroDivisionError:
+ pass
+ return result
+
+ def getTotalLines(self):
+ return self.totalLines
+
+ def getNonCompliantLines(self):
+ return self.nonCompliantLines
+
+ def explain(self):
+ self.exemplarData = getExemplar(self.fileName, self.exemplarCommand, self.exemplarConfig)
+ differ = diff(self.fileData, self.exemplarData)
+
+ ansiTerm = ANSITerm.ANSITerm()
+
+ for l in differ:
+ if l[0] == '-':
+ ansiTerm.printColorized("red", l)
+ elif l[0] == '+':
+ ansiTerm.printColorized("green", l)
+ elif l[0] == '?':
+ ansiTerm.printColorized("yellow", l[0:len(l)-1])
+ else:
+ print l
+ pass
+ return
+
+
+def csvScore(distribution, report):
+ string = "style,%s,%d,%d,%.2f" % (report["fileName"], report["totalLines"], report["nonCompliantLines"], report["score"])
+ LongBow.scorePrinter(distribution, report["score"], string)
+ return
+
+
+def csvAverage(distribution, complianceList):
+ scores = map(lambda target: target.getScore(), complianceList)
+ sum = reduce(lambda sum, score : sum + score, scores)
+ value = float(sum) / float(len(complianceList))
+ LongBow.scorePrinter(distribution, value, "%.2f" % (value))
+ return
+
+
+def csvTotal(distribution, complianceList):
+ totalLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getTotalLines(), complianceList))
+ totalNonCompliantLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getNonCompliantLines(), complianceList))
+ value = 100.0 - (100.0 * float(totalNonCompliantLines) / float(totalLines))
+ LongBow.scorePrinter(distribution, value, "%.2f" % (value))
+ return
+
+
+def csvSummary(distribution, complianceList):
+ map(lambda target: csvScore(distribution, target.report()), complianceList)
+ return
+
+
+def textScore(distribution, report, maxFileNameLength, prefix=""):
+ '''
+
+ '''
+ format = "%s%-*s %6d %6d %6.2f"
+ string = format % (prefix, maxFileNameLength, report["fileName"], report["totalLines"], report["nonCompliantLines"], report["score"])
+ LongBow.scorePrinter(distribution, report["score"], string)
+ return
+
+
+def textAverage(distribution, complianceList):
+ scores = map(lambda target: target.getScore(), complianceList)
+ sum = reduce(lambda sum, score : sum + score, scores)
+ value = float(sum) / float(len(complianceList))
+ LongBow.scorePrinter(distribution, value, "%.2f" % (value))
+ return
+
+
+def textTotal(distribution, complianceList):
+ totalLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getTotalLines(), complianceList))
+ totalNonCompliantLines = reduce(lambda sum, x: sum + x, map(lambda element : element.getNonCompliantLines(), complianceList))
+ value = 100.0 - (100.0 * float(totalNonCompliantLines) / float(totalLines))
+ LongBow.scorePrinter(distribution, value, "%.2f" % (value))
+ return
+
+
+def textSummary(distribution, complianceList, prefix=""):
+ if len(complianceList) > 0:
+ maxFileNameLength = max(max(map(lambda target: len(target.getFileName()), complianceList)), len("File Name"))
+
+ print "%s%-*s %6s %6s %6s" % (prefix, maxFileNameLength, "File Name", "Lines", "Errors", "Score")
+ map(lambda target: textScore(distribution, target.report(), maxFileNameLength, prefix), complianceList)
+
+ return
+
+
+def textVisual(complianceList):
+ map(lambda target: target.explain(), complianceList)
+ return
+
+
+def openDiff(sourceFile, exemplarCommand, exemplarConfig):
+ exemplar = getExemplar(sourceFile, exemplarCommand, exemplarConfig);
+ temporaryExemplarFile = tempfile.NamedTemporaryFile(suffix=".c", delete=False)
+ try:
+ with open(temporaryExemplarFile.name, "w") as exemplarOutput:
+ exemplarOutput.write(exemplar)
+
+ subprocess.check_output(["opendiff", sourceFile, temporaryExemplarFile.name, "-merge", sourceFile])
+ finally:
+ pass
+
+ return
+
+
+def displaySummary(args, complianceList):
+ distribution = eval(args.distribution)
+
+ if args.output == "text":
+ textSummary(distribution, complianceList)
+ elif args.output == "gui":
+ textSummary(distribution, complianceList)
+ else:
+ csvSummary(distribution, complianceList)
+ return
+
+
+def displayAverage(args, complianceList):
+
+ distribution = eval(args.distribution)
+
+ if args.output == "text":
+ textAverage(distribution, complianceList)
+ elif args.output == "gui":
+ textAverage(distribution, complianceList)
+ else:
+ csvAverage(distribution, complianceList)
+ return
+
+
+def displayTotal(args, complianceList):
+ distribution = eval(args.distribution)
+
+ if args.output == "text":
+ textTotal(distribution, complianceList)
+ elif args.output == "gui":
+ textTotal(distribution, complianceList)
+ else:
+ csvTotal(distribution, complianceList)
+ return
+
+
+def guiVisual(args, complianceList):
+ map(lambda target: openDiff(target.getFileName(), target.getExemplarCommand(), target.getExemplarConfig()), complianceList)
+ return
+
+
+def displayVisual(args, complianceList):
+ if args.output == "text":
+ textVisual(complianceList)
+ elif args.output == "gui":
+ guiVisual(args, complianceList)
+ else:
+ print >> sys.stderr, "Unsupported output format '%s'. Expected 'text' or 'gui'." % (args.output)
+ sys.exit(1)
+ return
+
+
+def sortComplianceList(args, complianceList):
+
+ sorter = {
+ "name" : { "function" : lambda k: k.getFileName(), "reverse" : False },
+ "descending-name" : { "function" : lambda k: k.getFileName(), "reverse" : True },
+ "score" : { "function" : lambda k: k.getScore(), "reverse" : False },
+ "descending-score" : { "function" : lambda k: k.getScore(), "reverse" : True },
+ "size" : { "function" : lambda k: k.getTotalLines(), "reverse" : False },
+ "descending-size" : { "function" : lambda k: k.getTotalLines(), "reverse" : True },
+ }
+
+ if args.key == "help":
+ print >> sys.stderr, "Supported sort keys:"
+ map(lambda k: sys.stderr.write("'" + k + "' "), sorted(sorter))
+ print
+ sys.exit(1)
+
+ if args.key in sorter:
+ complianceList = sorted(complianceList, key=sorter[args.key]["function"], reverse=sorter[args.key]["reverse"])
+ else:
+ print >> sys.stderr, "Unsupported sort key '%s'. Type '--key help'" % (args.key)
+ sys.exit(1)
+
+ return complianceList
+
+
+def exclude(args, complianceList):
+ excluded = map(lambda token : token.strip(), args.exclude.split(","))
+ complianceList = filter(lambda entry: LongBow.score(eval(args.distribution), entry.getScore()) not in excluded, complianceList)
+
+ return complianceList
+
+
+def gradeAndPrint(targets, exemplarCommand, exemplarConfig, problemsOnly=False, prefix=""):
+ complianceList = []
+ problemList = []
+ for target in targets:
+ try:
+ complianceList.append(SyntaxCompliance(target, exemplarCommand, exemplarConfig).check())
+ except:
+ problemList.append(target)
+ pass
+ complianceList = sorted(complianceList, key=lambda k: k.getFileName())
+ if problemsOnly:
+ complianceList = filter(lambda entry: entry.getScore() < 100, complianceList)
+ distribution=[99,90]
+ textSummary(distribution, complianceList, prefix)
+
+ for target in problemList:
+ print LongBow.buildRed("%s%s could not be evaluated" % (prefix, target))
+
+
+def commandLineMain(args, targets, exemplarCommand, exemplarConfig):
+ complianceList = map(lambda target: SyntaxCompliance(target, exemplarCommand, exemplarConfig).check(), targets)
+
+ complianceList = sortComplianceList(args, complianceList)
+
+ complianceList = exclude(args, complianceList)
+
+ if args.summary:
+ displaySummary(args, complianceList)
+ elif args.average:
+ displayAverage(args, complianceList)
+ elif args.total:
+ displayTotal(args, complianceList)
+ elif args.visual:
+ displayVisual(args, complianceList)
+ return
diff --git a/longbow/src/python/site-packages/longbow/SymbolTable.py b/longbow/src/python/site-packages/longbow/SymbolTable.py
new file mode 100755
index 00000000..20a909d7
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/SymbolTable.py
@@ -0,0 +1,87 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+import os
+import subprocess
+import re
+import sys
+import pprint
+
+def parseLocation(location):
+ token = location.split("[")
+ objectFileName = location
+
+ if len(token) > 1:
+ libraryName = token[0]
+ objectFileName = token[1].split("]")[0]
+ else:
+ libraryName = None
+
+ objectFileName = objectFileName.split(":")[0]
+ return (libraryName, objectFileName)
+
+def parseDarwinOutput(lines, accumulator = { }):
+
+ for line in lines:
+ token = line.split(" ")
+ fullName = token[0]
+
+ libraryName, objectFileName = parseLocation(token[0])
+ name = token[1]
+ type = token[2]
+ if fullName in accumulator:
+ if type == "U":
+ accumulator[fullName]["undefined"].append({ "name" : name })
+ elif type == "T":
+ accumulator[fullName]["defined"].append({ "name" : name })
+ elif type == "D":
+ accumulator[fullName]["globalData"].append({ "name" : name })
+ else:
+ accumulator[fullName] = { "fullName" : fullName, "libraryName" : libraryName, "objectFileName" : objectFileName, "defined" : [], "undefined" : [], "globalData" : [] }
+
+ return accumulator
+
+def getDarwinSymbolTable(objectFileName, accumulator = { }):
+ command = [ "/usr/bin/nm", "-PAog", objectFileName ]
+
+ output = subprocess.check_output(command)
+ lines = output.splitlines()
+ return parseDarwinOutput(lines, accumulator)
+
+
+def getSymbolTable(objectFileName, accumulator = { }):
+ '''
+ {
+ fullName : { "defined" : list of dictionaries,
+ "undefined": list of dictionaries,
+ "globalData" : list of dictionaries,
+ "libraryName" : string
+ "objectFileName : string
+ },
+ }
+ '''
+ return getDarwinSymbolTable(objectFileName, accumulator)
+
+
+if __name__ == '__main__':
+
+ table = dict()
+ for f in sys.argv:
+ table = getSymbolTable(f, table)
+
+ pp = pprint.PrettyPrinter(indent=4, width=132)
+
+ pp.pprint(table)
diff --git a/longbow/src/python/site-packages/longbow/VocabularyReport.py b/longbow/src/python/site-packages/longbow/VocabularyReport.py
new file mode 100755
index 00000000..5609db11
--- /dev/null
+++ b/longbow/src/python/site-packages/longbow/VocabularyReport.py
@@ -0,0 +1,162 @@
+#! /usr/bin/env python
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+
+import sys
+import itertools
+
+import LongBow
+
+def computeVocabularyScore(tokenCount):
+ return 100.0
+
+
+def csvFunctionResult(file, function):
+ score = computeVocabularyScore(file.token_count)
+ string = "vocabulary,%s,%s,%d,%d,%.2f" % (file.filename, function.name, function.start_line, function.token_count, score)
+
+ LongBow.scorePrinter([90, 80], score, string)
+ return function.token_count
+
+
+def csvFileVocabulary(file):
+ score = computeVocabularyScore(file.token_count)
+ string = "vocabulary,%s,,,%.2f,%.2f" % (file.filename, file.average_token, score)
+ LongBow.scorePrinter([90, 80], score, string)
+ return
+
+
+def csvFunction(fileInformationList):
+ for fileInformation in fileInformationList:
+ complexities = map(lambda function: csvFunctionResult(fileInformation, function), fileInformation)
+ return
+
+
+def csvSummary(fileInformationList):
+ map(lambda file: csvFileVocabulary(file), fileInformationList)
+ return
+
+
+def textFunctionResult(file, function, maxFileNameLength, maxFunctionNameLength):
+ score = computeVocabularyScore(function.token_count)
+ format = "%-" + str(maxFileNameLength) + "s %-" + str(maxFunctionNameLength) + "s %3d %3d %6.2f"
+ string = format % (file.filename, function.name, function.start_line, function.token_count, score)
+
+ LongBow.scorePrinter([90, 80], score, string)
+ return function.cyclomatic_complexity
+
+
+def textFileVocabulary(file, maxFileNameLength, printFormat=""):
+ score = computeVocabularyScore(file.average_CCN)
+ if printFormat == "":
+ printFormat = "%-" + str(maxFileNameLength) + "s %6.2f %6.2f"
+ string = printFormat % (file.filename, file.average_token, score)
+ LongBow.scorePrinter([90, 80], score, string)
+ return
+
+
+def computeMaxFileNameLength(fileInformationList):
+ result = 0
+ for fileInformation in fileInformationList:
+ if len(fileInformation.filename) > result:
+ result = len(fileInformation.filename)
+ return result
+
+
+def computeMaxFunctionNameLength(fileInformationList):
+ result = 0
+ for fileInformation in fileInformationList:
+ if len(fileInformation.filename) > result:
+ result = len(fileInformation.filename)
+ return result
+
+
+def textFunction(fileInformationList):
+ maxFileNameLength = max(map(lambda fileInformation: len(fileInformation.filename), fileInformationList))
+ maxFunctionNameLength = max(map(lambda fileInformation: max(map(lambda function: len(function.name), fileInformation)), fileInformationList))
+
+ for fileInformation in fileInformationList:
+ complexities = map(lambda function: textFunctionResult(fileInformation, function, maxFileNameLength, maxFunctionNameLength), fileInformation)
+ return
+
+
+def textSummary(fileInformationList, prefix=""):
+ if len(fileInformationList) < 1:
+ print "%sNo Files To Grade" % prefix
+ return
+ maxFileNameLength = max(map(lambda fileInformation: len(fileInformation.filename), fileInformationList))
+ printFormat = prefix + "%-" + str(maxFileNameLength) + "s %10s %6s"
+ print printFormat % ("File Path", "Ave Token", "Score")
+ printFormat = prefix + "%-" + str(maxFileNameLength) + "s %10.2f %6.2f"
+ map(lambda file: textFileVocabulary(file, maxFileNameLength, printFormat), fileInformationList)
+ return
+
+
+def computeAverage(fileInformationList):
+ vocabulary = map(lambda fileInformation : fileInformation.average_token, fileInformationList)
+ sum = reduce(lambda sum, x: sum + x, vocabulary)
+ return float(sum) / float(len(vocabulary))
+
+
+def gradeAndPrint(fileList, hfcca, problemsOnly=False, prefix=""):
+ options, arguments = hfcca.createHfccaCommandLineParser().parse_args(args=["foo"])
+ result = hfcca.analyze(fileList, options)
+
+ # Convert from that iterator to a simple list...
+ fileInformationList = map(lambda x : x, result)
+ if problemsOnly:
+ fileInformationList = filter(lambda item: computeVocabularyScore(item.average_CCN) < 100, fileInformationList)
+
+ textSummary(fileInformationList, prefix)
+
+def commandLineMain(args, hfcca):
+ targets = []
+
+ if args.stdin:
+ for line in sys.stdin:
+ t = line.strip()
+ if (len(t) > 0):
+ targets.append(t)
+ else:
+ targets = args.files
+
+ if (len(targets) == 0):
+ print >> sys.stderr, "Error: target list cannot be empty"
+
+ # If nothing was specified, print the summary as a default
+ if args.summary == False and args.function == False and args.average == False:
+ args.summary = True
+
+ options, arguments = hfcca.createHfccaCommandLineParser().parse_args(args=["VocabularyReport"])
+ result = hfcca.analyze(targets, options)
+
+ # Convert from that iterator to a simple list...
+ fileInformationList = map(lambda x : x, result)
+
+ if args.function:
+ if args.output == "text":
+ textFunction(fileInformationList)
+ else:
+ csvFunction(fileInformationList)
+
+ if args.summary:
+ if args.output == "text":
+ textSummary(fileInformationList)
+ else:
+ csvSummary(fileInformationList)
+
+ if args.average:
+ print "%.2f" % computeAverage(fileInformationList)