Moved scripts/ to tools/scripts/

This commit is contained in:
Martin Hořeňovský
2019-12-06 11:53:31 +01:00
parent 6eb04667ad
commit 91ee07e08c
22 changed files with 5 additions and 5 deletions

215
tools/scripts/approvalTests.py Executable file
View File

@@ -0,0 +1,215 @@
#!/usr/bin/env python
from __future__ import print_function
import io
import os
import sys
import subprocess
import re
import difflib
import scriptCommon
from scriptCommon import catchPath
if os.name == 'nt':
# Enable console colours on windows
os.system('')
rootPath = os.path.join(catchPath, 'tests/SelfTest/Baselines')
langFilenameParser = re.compile(r'(.+\.[ch]pp)')
filelocParser = re.compile(r'''
.*/
(.+\.[ch]pp) # filename
(?::|\() # : is starting separator between filename and line number on Linux, ( on Windows
([0-9]*) # line number
\)? # Windows also has an ending separator, )
''', re.VERBOSE)
lineNumberParser = re.compile(r' line="[0-9]*"')
hexParser = re.compile(r'\b(0[xX][0-9a-fA-F]+)\b')
durationsParser = re.compile(r' time="[0-9]*\.[0-9]*"')
sonarqubeDurationParser = re.compile(r' duration="[0-9]+"')
timestampsParser = re.compile(r'\d{4}-\d{2}-\d{2}T\d{2}\:\d{2}\:\d{2}Z')
versionParser = re.compile(r'Catch v[0-9]+\.[0-9]+\.[0-9]+(-develop\.[0-9]+)?')
nullParser = re.compile(r'\b(__null|nullptr)\b')
exeNameParser = re.compile(r'''
\b
(CatchSelfTest|SelfTest) # Expected executable name
(?:.exe)? # Executable name contains .exe on Windows.
\b
''', re.VERBOSE)
# This is a hack until something more reasonable is figured out
specialCaseParser = re.compile(r'file\((\d+)\)')
# errno macro expands into various names depending on platform, so we need to fix them up as well
errnoParser = re.compile(r'''
\(\*__errno_location\ \(\)\)
|
\(\*__error\(\)\)
|
\(\*_errno\(\)\)
''', re.VERBOSE)
sinceEpochParser = re.compile(r'\d+ .+ since epoch')
infParser = re.compile(r'''
\(\(float\)\(1e\+300\ \*\ 1e\+300\)\) # MSVC INFINITY macro
|
\(__builtin_inff\(\)\) # Linux (ubuntu) INFINITY macro
|
\(__builtin_inff\ \(\)\) # Fedora INFINITY macro
|
__builtin_huge_valf\(\) # OSX macro
''', re.VERBOSE)
nanParser = re.compile(r'''
\(\(float\)\(\(\(float\)\(1e\+300\ \*\ 1e\+300\)\)\ \*\ 0\.0F\)\) # MSVC NAN macro
|
\(\(float\)\(INFINITY\ \*\ 0\.0F\)\) # Yet another MSVC NAN macro
|
\(__builtin_nanf\ \(""\)\) # Linux (ubuntu) NAN macro
|
__builtin_nanf\("0x<hex\ digits>"\) # The weird content of the brackets is there because a different parser has already ran before this one
''', re.VERBOSE)
if len(sys.argv) == 2:
cmdPath = sys.argv[1]
else:
cmdPath = os.path.join(catchPath, scriptCommon.getBuildExecutable())
overallResult = 0
def diffFiles(fileA, fileB):
with io.open(fileA, 'r', encoding='utf-8', errors='surrogateescape') as file:
aLines = [line.rstrip() for line in file.readlines()]
with io.open(fileB, 'r', encoding='utf-8', errors='surrogateescape') as file:
bLines = [line.rstrip() for line in file.readlines()]
shortenedFilenameA = fileA.rsplit(os.sep, 1)[-1]
shortenedFilenameB = fileB.rsplit(os.sep, 1)[-1]
diff = difflib.unified_diff(aLines, bLines, fromfile=shortenedFilenameA, tofile=shortenedFilenameB, n=0)
return [line for line in diff if line[0] in ('+', '-')]
def normalizeFilepath(line):
if catchPath in line:
# make paths relative to Catch root
line = line.replace(catchPath + os.sep, '')
m = langFilenameParser.match(line)
if m:
filepath = m.group(0)
# go from \ in windows paths to /
filepath = filepath.replace('\\', '/')
# remove start of relative path
filepath = filepath.replace('../', '')
line = line[:m.start()] + filepath + line[m.end():]
return line
def filterLine(line, isCompact):
line = normalizeFilepath(line)
# strip source line numbers
m = filelocParser.match(line)
if m:
# note that this also strips directories, leaving only the filename
filename, lnum = m.groups()
lnum = ":<line number>" if lnum else ""
line = filename + lnum + line[m.end():]
else:
line = lineNumberParser.sub(" ", line)
if isCompact:
line = line.replace(': FAILED', ': failed')
line = line.replace(': PASSED', ': passed')
# strip Catch version number
line = versionParser.sub("<version>", line)
# replace *null* with 0
line = nullParser.sub("0", line)
# strip executable name
line = exeNameParser.sub("<exe-name>", line)
# strip hexadecimal numbers (presumably pointer values)
line = hexParser.sub("0x<hex digits>", line)
# strip durations and timestamps
line = durationsParser.sub(' time="{duration}"', line)
line = sonarqubeDurationParser.sub(' duration="{duration}"', line)
line = timestampsParser.sub('{iso8601-timestamp}', line)
line = specialCaseParser.sub('file:\g<1>', line)
line = errnoParser.sub('errno', line)
line = sinceEpochParser.sub('{since-epoch-report}', line)
line = infParser.sub('INFINITY', line)
line = nanParser.sub('NAN', line)
return line
def approve(baseName, args):
global overallResult
args[0:0] = [cmdPath]
if not os.path.exists(cmdPath):
raise Exception("Executable doesn't exist at " + cmdPath)
baselinesPath = os.path.join(rootPath, '{0}.approved.txt'.format(baseName))
rawResultsPath = os.path.join(rootPath, '_{0}.tmp'.format(baseName))
filteredResultsPath = os.path.join(rootPath, '{0}.unapproved.txt'.format(baseName))
f = open(rawResultsPath, 'w')
subprocess.call(args, stdout=f, stderr=f)
f.close()
rawFile = io.open(rawResultsPath, 'r', encoding='utf-8', errors='surrogateescape')
filteredFile = io.open(filteredResultsPath, 'w', encoding='utf-8', errors='surrogateescape')
for line in rawFile:
filteredFile.write(filterLine(line, 'compact' in baseName).rstrip() + "\n")
filteredFile.close()
rawFile.close()
os.remove(rawResultsPath)
print()
print(baseName + ":")
if os.path.exists(baselinesPath):
diffResult = diffFiles(baselinesPath, filteredResultsPath)
if diffResult:
print('\n'.join(diffResult))
print(" \n****************************\n \033[91mResults differed")
if len(diffResult) > overallResult:
overallResult = len(diffResult)
else:
os.remove(filteredResultsPath)
print(" \033[92mResults matched")
print("\033[0m")
else:
print(" first approval")
if overallResult == 0:
overallResult = 1
print("Running approvals against executable:")
print(" " + cmdPath)
# ## Keep default reporters here ##
# Standard console reporter
approve("console.std", ["~[!nonportable]~[!benchmark]~[approvals] *", "--order", "lex", "--rng-seed", "1"])
# console reporter, include passes, warn about No Assertions
approve("console.sw", ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions", "--order", "lex", "--rng-seed", "1"])
# console reporter, include passes, warn about No Assertions, limit failures to first 4
approve("console.swa4", ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions", "-x", "4", "--order", "lex", "--rng-seed", "1"])
# junit reporter, include passes, warn about No Assertions
approve("junit.sw", ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions", "-r", "junit", "--order", "lex", "--rng-seed", "1"])
# xml reporter, include passes, warn about No Assertions
approve("xml.sw", ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions", "-r", "xml", "--order", "lex", "--rng-seed", "1"])
# compact reporter, include passes, warn about No Assertions
approve('compact.sw', ['~[!nonportable]~[!benchmark]~[approvals] *', '-s', '-w', 'NoAssertions', '-r', 'compact', '--order', 'lex', "--rng-seed", "1"])
# sonarqube reporter, include passes, warn about No Assertions
approve("sonarqube.sw", ["~[!nonportable]~[!benchmark]~[approvals] *", "-s", "-w", "NoAssertions", "-r", "sonarqube", "--order", "lex", "--rng-seed", "1"])
if overallResult != 0:
print("If these differences are expected, run approve.py to approve new baselines.")
exit(overallResult)

33
tools/scripts/approve.py Executable file
View File

@@ -0,0 +1,33 @@
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import shutil
import glob
from scriptCommon import catchPath
rootPath = os.path.join( catchPath, 'tests/SelfTest/Baselines' )
if len(sys.argv) > 1:
files = [os.path.join( rootPath, f ) for f in sys.argv[1:]]
else:
files = glob.glob( os.path.join( rootPath, "*.unapproved.txt" ) )
def approveFile( approvedFile, unapprovedFile ):
justFilename = unapprovedFile[len(rootPath)+1:]
if os.path.exists( unapprovedFile ):
if os.path.exists( approvedFile ):
os.remove( approvedFile )
os.rename( unapprovedFile, approvedFile )
print( "approved " + justFilename )
else:
print( "approval file " + justFilename + " does not exist" )
if files:
for unapprovedFile in files:
approveFile( unapprovedFile.replace( "unapproved.txt", "approved.txt" ), unapprovedFile )
else:
print( "no files to approve" )

148
tools/scripts/benchmarkCompile.py Executable file
View File

@@ -0,0 +1,148 @@
#!/usr/bin/env python
from __future__ import print_function
import time, subprocess, sys, os, shutil, glob, random
import argparse
def median(lst):
lst = sorted(lst)
mid, odd = divmod(len(lst), 2)
if odd:
return lst[mid]
else:
return (lst[mid - 1] + lst[mid]) / 2.0
def mean(lst):
return float(sum(lst)) / max(len(lst), 1)
compiler_path = ''
flags = []
main_file = r'''
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
'''
main_name = 'catch-main.cpp'
dir_name = 'benchmark-dir'
files = 20
test_cases_in_file = 20
sections_in_file = 4
assertions_per_section = 5
checks = [
'a != b', 'a != c', 'a != d', 'a != e', 'b != c', 'b != d', 'b != e', 'c != d', 'c != e', 'd != e', 'a + a == a',
'a + b == b', 'a + c == c', 'a + d == d', 'a + e == e', 'b + a == b', 'b + b == c', 'b + c == d',
'b + d == e', 'c + a == c', 'c + b == d', 'c + c == e', 'd + a == d', 'd + b == e', 'e + a == e',
'a + a + a == a', 'b + c == a + d', 'c + a + a == a + b + b + a',
'a < b', 'b < c', 'c < d', 'd < e', 'a >= a', 'd >= b',
]
def create_temp_dir():
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.mkdir(dir_name)
def copy_catch(path_to_catch):
shutil.copy(path_to_catch, dir_name)
def create_catch_main():
with open(main_name, 'w') as f:
f.write(main_file)
def compile_main():
start_t = time.time()
subprocess.check_call([compiler_path, main_name, '-c'] + flags)
end_t = time.time()
return end_t - start_t
def compile_files():
cpp_files = glob.glob('tests*.cpp')
start_t = time.time()
subprocess.check_call([compiler_path, '-c'] + flags + cpp_files)
end_t = time.time()
return end_t - start_t
def link_files():
obj_files = glob.glob('*.o')
start_t = time.time()
subprocess.check_call([compiler_path] + flags + obj_files)
end_t = time.time()
return end_t - start_t
def benchmark(func):
results = [func() for i in range(10)]
return mean(results), median(results)
def char_range(start, end):
for c in range(ord(start), ord(end)):
yield chr(c)
def generate_sections(fd):
for i in range(sections_in_file):
fd.write(' SECTION("Section {}") {{\n'.format(i))
fd.write('\n'.join(' CHECK({});'.format(check) for check in random.sample(checks, assertions_per_section)))
fd.write(' }\n')
def generate_file(file_no):
with open('tests{}.cpp'.format(file_no), 'w') as f:
f.write('#include "catch.hpp"\n\n')
for i in range(test_cases_in_file):
f.write('TEST_CASE("File {} test {}", "[.compile]"){{\n'.format(file_no, i))
for i, c in enumerate(char_range('a', 'f')):
f.write(' int {} = {};\n'.format(c, i))
generate_sections(f)
f.write('}\n\n')
def generate_files():
create_catch_main()
for i in range(files):
generate_file(i)
options = ['all', 'main', 'files', 'link']
parser = argparse.ArgumentParser(description='Benchmarks Catch\'s compile times against some synthetic tests')
# Add first arg -- benchmark type
parser.add_argument('benchmark_kind', nargs='?', default='all', choices=options, help='What kind of benchmark to run, default: all')
# Args to allow changing header/compiler
parser.add_argument('-I', '--catch-header', default='catch.hpp', help = 'Path to catch.hpp, default: catch.hpp')
parser.add_argument('-c', '--compiler', default='g++', help = 'Compiler to use, default: g++')
parser.add_argument('-f', '--flags', help = 'Flags to be passed to the compiler. Pass as "," separated list')
# Allow creating files only, without running the whole thing
parser.add_argument('-g', '--generate-files', action='store_true', help='Generate test files and quit')
args = parser.parse_args()
compiler_path = args.compiler
catch_path = args.catch_header
if args.generate_files:
create_temp_dir()
copy_catch(catch_path)
os.chdir(dir_name)
# now create the fake test files
generate_files()
# Early exit
print('Finished generating files')
exit(1)
os.chdir(dir_name)
if args.flags:
flags = args.flags.split(',')
print('Time needed for ...')
if args.benchmark_kind in ('all', 'main'):
print(' ... compiling main, mean: {:.2f}, median: {:.2f} s'.format(*benchmark(compile_main)))
if args.benchmark_kind in ('all', 'files'):
print(' ... compiling test files, mean: {:.2f}, median: {:.2f} s'.format(*benchmark(compile_files)))
if args.benchmark_kind in ('all', 'link'):
print(' ... linking everything, mean: {:.2f}, median: {:.2f} s'.format(*benchmark(link_files)))

View File

@@ -0,0 +1,56 @@
#!/usr/bin/env python3
import subprocess, os, sys
import xml.etree.ElementTree as ET
from collections import defaultdict
from statistics import median, stdev
from datetime import datetime
def get_commit_hash():
res = subprocess.run('git rev-parse HEAD'.split(), check=True, stdout=subprocess.PIPE, universal_newlines=True)
return res.stdout.strip()
if len(sys.argv) < 2:
print('Usage: {} benchmark-binary'.format(sys.argv[0]))
exit(1)
num_runs = 10
data = defaultdict(list)
def parse_file(file):
def recursive_search(node):
if node.tag == 'TestCase':
results = node.find('OverallResult')
time = results.get('durationInSeconds')
data[node.get('name')].append(float(time))
elif node.tag in ('Group', 'Catch'):
for child in node:
recursive_search(child)
tree = ET.parse(file)
recursive_search(tree.getroot())
def run_benchmarks(binary):
call = [binary] + '-d yes -r xml -o'.split()
for i in range(num_runs):
file = 'temp{}.xml'.format(i)
print('Run number {}'.format(i))
subprocess.run(call + [file])
parse_file(file)
# Remove file right after parsing, because benchmark output can be big
os.remove(file)
# Run benchmarks
run_benchmarks(sys.argv[1])
result_file = '{:%Y-%m-%dT%H-%M-%S}-{}.result'.format(datetime.now(), get_commit_hash())
print('Writing results to {}'.format(result_file))
with open(result_file, 'w') as file:
for k in sorted(data):
file.write('{}: median: {} (s), stddev: {} (s)\n'.format(k, median(data[k]), stdev(data[k])))

10
tools/scripts/developBuild.py Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
from __future__ import print_function
import releaseCommon
v = releaseCommon.Version()
v.incrementBuildNumber()
releaseCommon.performUpdates(v)
print( "Updated Version.hpp, README and Conan to v{0}".format( v.getVersionString() ) )

63
tools/scripts/embed.py Normal file
View File

@@ -0,0 +1,63 @@
import re
preprocessorRe = re.compile( r'\s*#.*' )
fdefineRe = re.compile( r'\s*#\s*define\s*(\S*)\s*\(' ) # #defines that take arguments
defineRe = re.compile( r'\s*#\s*define\s*(\S*)(\s+)(.*)' ) # all #defines
undefRe = re.compile( r'\s*#\s*undef\s*(\S*)' ) # all #undefs
ifdefCommonRe = re.compile( r'\s*#\s*if' ) # all #ifdefs
ifdefRe = re.compile( r'\s*#\s*ifdef\s*(\S*)' )
ifndefRe = re.compile( r'\s*#\s*ifndef\s*(\S*)' )
endifRe = re.compile( r'\s*#\s*endif\s*//\s*(.*)' )
elseRe = re.compile( r'\s*#\s*else' )
ifRe = re.compile( r'\s*#\s*if\s+(.*)' )
nsRe = re.compile( r'(.*?\s*\s*namespace\s+)(\w+)(\s*{?)(.*)' )
nsCloseRe = re.compile( r'(.*\s*})(\s*\/\/\s*namespace\s+)(\w+)(\s*)(.*)' )
class LineMapper:
def __init__( self, idMap, outerNamespace ):
self.idMap = idMap
self.outerNamespace = outerNamespace
# TBD:
# #if, #ifdef, comments after #else
def mapLine( self, lineNo, line ):
for idFrom, idTo in self.idMap.items():
r = re.compile("(.*)" + idFrom + "(.*)")
m = r.match( line )
if m:
line = m.group(1) + idTo + m.group(2) + "\n"
m = nsCloseRe.match( line )
if m:
originalNs = m.group(3)
# print("[{0}] originalNs: '{1}' - closing".format(lineNo, originalNs))
# print( " " + line )
# print( " 1:[{0}]\n 2:[{1}]\n 3:[{2}]\n 4:[{3}]\n 5:[{4}]".format( m.group(1), m.group(2), m.group(3), m.group(4), m.group(5) ) )
if originalNs in self.outerNamespace:
outerNs, innerNs = self.outerNamespace[originalNs]
return "{0}}}{1}{2}::{3}{4}{5}\n".format( m.group(1), m.group(2), outerNs, innerNs, m.group(4), m.group(5))
m = nsRe.match( line )
if m:
originalNs = m.group(2)
# print("[{0}] originalNs: '{1}'".format(lineNo, originalNs))
# print( " " + line )
# print( " 1:[{0}]\n 2:[{1}]\n 3:[{2}]\n 4:[{3}]".format( m.group(1), m.group(2), m.group(3), m.group(4) ) )
if originalNs in self.outerNamespace:
outerNs, innerNs = self.outerNamespace[originalNs]
return "{0}{1} {{ namespace {2}{3}{4}\n".format( m.group(1), outerNs, innerNs, m.group(3), m.group(4) )
return line
def mapFile(self, filenameIn, filenameOut ):
print( "Embedding:\n {0}\nas:\n {1}".format( filenameIn, filenameOut ) )
with open( filenameIn, 'r' ) as f, open( filenameOut, 'w' ) as outf:
lineNo = 1
for line in f:
outf.write( self.mapLine( lineNo, line ) )
lineNo = lineNo + 1
print( "Written {0} lines".format( lineNo ) )

27
tools/scripts/embedClara.py Executable file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env python
# Execute this script any time you import a new copy of Clara into the third_party area
import os
import sys
import embed
rootPath = os.path.dirname(os.path.realpath( os.path.dirname(sys.argv[0])))
filename = os.path.join( rootPath, "third_party", "clara.hpp" )
outfilename = os.path.join( rootPath, "include", "external", "clara.hpp" )
# Mapping of pre-processor identifiers
idMap = {
"CLARA_HPP_INCLUDED": "CATCH_CLARA_HPP_INCLUDED",
"CLARA_CONFIG_CONSOLE_WIDTH": "CATCH_CLARA_CONFIG_CONSOLE_WIDTH",
"CLARA_TEXTFLOW_HPP_INCLUDED": "CATCH_CLARA_TEXTFLOW_HPP_INCLUDED",
"CLARA_TEXTFLOW_CONFIG_CONSOLE_WIDTH": "CATCH_CLARA_TEXTFLOW_CONFIG_CONSOLE_WIDTH",
"CLARA_PLATFORM_WINDOWS": "CATCH_PLATFORM_WINDOWS"
}
# outer namespace to add
outerNamespace = { "clara": ("Catch", "clara") }
mapper = embed.LineMapper( idMap, outerNamespace )
mapper.mapFile( filename, outfilename )

View File

@@ -0,0 +1,94 @@
#!/usr/bin/env python
#
# extractFeaturesFromReleaseNotes.py
#
# Read the release notes - docs/release-notes.md - and generate text
# for pasting in to individual documentation pages, to indicate which
# versions recent features were released in.
#
# Using the output of the file is easier than manually constructing
# the text to paste in to documentation pages.
#
# One way to use this:
# - run this script, saving the output to some temporary file
# - diff this output with the actual release notes page
# - the differences are Markdown text that can be pasted in to the
# appropriate documentation pages in the docs/ directory.
# - each release also has a github link to show which documentation files
# were changed in it.
# This can be helpful to see which documentation pages
# to add the 'Introduced in Catch ...' snippets to the relevant pages.
#
from __future__ import print_function
import re
def create_introduced_in_text(version, bug_number = None):
"""Generate text to paste in to documentation file"""
if bug_number:
return '> [Introduced](https://github.com/catchorg/Catch2/issues/%s) in Catch %s.' % (bug_number, version)
else:
# Use this text for changes that don't have issue numbers
return '> Introduced in Catch %s.' % version
def link_to_changes_in_release(release, releases):
"""
Markdown text for a hyperlink showing all edits in a release, or empty string
:param release: A release version, as a string
:param releases: A container of releases, in descending order - newest to oldest
:return: Markdown text for a hyperlink showing the differences between the give release and the prior one,
or empty string, if the previous release is not known
"""
if release == releases[-1]:
# This is the earliest release we know about
return ''
index = releases.index(release)
previous_release = releases[index + 1]
return '\n[Changes in %s](https://github.com/catchorg/Catch2/compare/v%s...v%s)' % (release, previous_release, release)
def write_recent_release_notes_with_introduced_text():
current_version = None
release_toc_regex = r'\[(\d.\d.\d)\]\(#\d+\)<br>'
issue_number_regex = r'#[0-9]+'
releases = []
with open('../docs/release-notes.md') as release_notes:
for line in release_notes:
line = line[:-1]
print(line)
# Extract version number from table of contents
match = re.search(release_toc_regex, line)
if match:
release_name = match.group(1)
releases.append(release_name)
if line.startswith('## '):
# It's a section with version number
current_version = line.replace('## ', '')
# We decided not to add released-date info for older versions
if current_version == 'Older versions':
break
print(create_introduced_in_text(current_version))
print(link_to_changes_in_release(current_version, releases))
# Not yet found a version number, so to avoid picking up hyperlinks to
# version numbers in the index, keep going
if not current_version:
continue
for bug_link in re.findall(issue_number_regex, line):
bug_number = bug_link.replace('#', '')
print(create_introduced_in_text(current_version, bug_number))
if __name__ == '__main__':
write_recent_release_notes_with_introduced_text()

52
tools/scripts/fixWhitespace.py Executable file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env python
from __future__ import print_function
import os
from scriptCommon import catchPath
def isSourceFile( path ):
return path.endswith( ".cpp" ) or path.endswith( ".h" ) or path.endswith( ".hpp" )
def fixAllFilesInDir( dir ):
changedFiles = 0
for f in os.listdir( dir ):
path = os.path.join( dir,f )
if os.path.isfile( path ):
if isSourceFile( path ):
if fixFile( path ):
changedFiles += 1
else:
fixAllFilesInDir( path )
return changedFiles
def fixFile( path ):
f = open( path, 'r' )
lines = []
changed = 0
for line in f:
trimmed = line.rstrip() + "\n"
trimmed = trimmed.replace('\t', ' ')
if trimmed != line:
changed = changed +1
lines.append( trimmed )
f.close()
if changed > 0:
global changedFiles
changedFiles = changedFiles + 1
print( path + ":" )
print( " - fixed " + str(changed) + " line(s)" )
altPath = path + ".backup"
os.rename( path, altPath )
f2 = open( path, 'w' )
for line in lines:
f2.write( line )
f2.close()
os.remove( altPath )
return True
return False
changedFiles = fixAllFilesInDir(catchPath)
if changedFiles > 0:
print( "Fixed " + str(changedFiles) + " file(s)" )
else:
print( "No trailing whitespace found" )

View File

@@ -0,0 +1,129 @@
#!/usr/bin/env python
from __future__ import print_function
import os
import io
import sys
import re
import datetime
from glob import glob
from scriptCommon import catchPath
def generate(v):
includesParser = re.compile( r'\s*#\s*include\s*"(.*)"' )
guardParser = re.compile( r'\s*#.*(TWOBLUECUBES_)?CATCH_.*_INCLUDED')
defineParser = re.compile( r'\s*#define\s+(TWOBLUECUBES_)?CATCH_.*_INCLUDED')
ifParser = re.compile( r'\s*#ifndef (TWOBLUECUBES_)?CATCH_.*_INCLUDED')
endIfParser = re.compile( r'\s*#endif // (TWOBLUECUBES_)?CATCH_.*_INCLUDED')
ifImplParser = re.compile( r'\s*#ifdef CATCH_CONFIG_RUNNER' )
commentParser1 = re.compile( r'^\s*/\*')
commentParser2 = re.compile( r'^ \*')
blankParser = re.compile( r'^\s*$')
seenHeaders = set([])
rootPath = os.path.join( catchPath, 'include/' )
outputPath = os.path.join( catchPath, 'single_include/catch2/catch.hpp' )
globals = {
'includeImpl' : True,
'ifdefs' : 0,
'implIfDefs' : -1
}
for arg in sys.argv[1:]:
arg = arg.lower()
if arg == "noimpl":
globals['includeImpl'] = False
print( "Not including impl code" )
else:
print( "\n** Unrecognised argument: " + arg + " **\n" )
exit(1)
# ensure that the output directory exists (hopefully no races)
outDir = os.path.dirname(outputPath)
if not os.path.exists(outDir):
os.makedirs(outDir)
out = io.open( outputPath, 'w', newline='\n', encoding='utf-8')
def write( line ):
if globals['includeImpl'] or globals['implIfDefs'] == -1:
out.write( line )
def insertCpps():
dirs = [os.path.join( rootPath, s) for s in ['', 'internal', 'reporters', 'internal/benchmark', 'internal/benchmark/detail']]
cppFiles = []
for dir in dirs:
cppFiles += glob(os.path.join(dir, '*.cpp'))
# To minimize random diffs, sort the files before processing them
for fname in sorted(cppFiles):
dir, name = fname.rsplit(os.path.sep, 1)
dir += os.path.sep
parseFile(dir, name)
def parseFile( path, filename ):
f = io.open( os.path.join(path, filename), 'r', encoding='utf-8' )
blanks = 0
write( u"// start {0}\n".format( filename ) )
for line in f:
if '// ~*~* CATCH_CPP_STITCH_PLACE *~*~' in line:
insertCpps()
continue
elif ifParser.match( line ):
globals['ifdefs'] += 1
elif endIfParser.match( line ):
globals['ifdefs'] -= 1
if globals['ifdefs'] == globals['implIfDefs']:
globals['implIfDefs'] = -1
m = includesParser.match( line )
if m:
header = m.group(1)
headerPath, sep, headerFile = header.rpartition( "/" )
if headerFile not in seenHeaders:
if headerFile != "tbc_text_format.h" and headerFile != "clara.h":
seenHeaders.add( headerFile )
if headerPath == "internal" and path.endswith("internal/"):
headerPath = ""
sep = ""
if os.path.exists( path + headerPath + sep + headerFile ):
parseFile( path + headerPath + sep, headerFile )
else:
parseFile( rootPath + headerPath + sep, headerFile )
else:
if ifImplParser.match(line):
globals['implIfDefs'] = globals['ifdefs']
if (not guardParser.match( line ) or defineParser.match( line ) ) and not commentParser1.match( line )and not commentParser2.match( line ):
if blankParser.match( line ):
blanks = blanks + 1
else:
blanks = 0
if blanks < 2 and not defineParser.match(line):
write( line.rstrip() + "\n" )
write( u'// end {}\n'.format(filename) )
write( u"/*\n" )
write( u" * Catch v{0}\n".format( v.getVersionString() ) )
write( u" * Generated: {0}\n".format( datetime.datetime.now() ) )
write( u" * ----------------------------------------------------------\n" )
write( u" * This file has been merged from multiple headers. Please don't edit it directly\n" )
write( u" * Copyright (c) {} Two Blue Cubes Ltd. All rights reserved.\n".format( datetime.date.today().year ) )
write( u" *\n" )
write( u" * Distributed under the Boost Software License, Version 1.0. (See accompanying\n" )
write( u" * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n" )
write( u" */\n" )
write( u"#ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED\n" )
write( u"#define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED\n" )
parseFile( rootPath, 'catch.hpp' )
write( u"#endif // TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED\n\n" )
out.close()
print ("Generated single include for Catch v{0}\n".format( v.getVersionString() ) )
if __name__ == '__main__':
from releaseCommon import Version
generate(Version())

10
tools/scripts/majorRelease.py Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
from __future__ import print_function
import releaseCommon
v = releaseCommon.Version()
v.incrementMajorVersion()
releaseCommon.performUpdates(v)
print( "Updated Version.hpp, README and Conan to v{0}".format( v.getVersionString() ) )

10
tools/scripts/minorRelease.py Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
from __future__ import print_function
import releaseCommon
v = releaseCommon.Version()
v.incrementMinorVersion()
releaseCommon.performUpdates(v)
print( "Updated Version.hpp, README and Conan to v{0}".format( v.getVersionString() ) )

10
tools/scripts/patchRelease.py Executable file
View File

@@ -0,0 +1,10 @@
#!/usr/bin/env python
from __future__ import print_function
import releaseCommon
v = releaseCommon.Version()
v.incrementPatchNumber()
releaseCommon.performUpdates(v)
print( "Updated Version.hpp, README and Conan to v{0}".format( v.getVersionString() ) )

View File

@@ -0,0 +1,166 @@
from __future__ import print_function
import os
import sys
import re
import string
import glob
import fnmatch
from scriptCommon import catchPath
versionParser = re.compile( r'(\s*static\sVersion\sversion)\s*\(\s*(.*)\s*,\s*(.*)\s*,\s*(.*)\s*,\s*\"(.*)\"\s*,\s*(.*)\s*\).*' )
rootPath = os.path.join( catchPath, 'include/' )
versionPath = os.path.join( rootPath, "internal/catch_version.cpp" )
definePath = os.path.join(rootPath, 'catch.hpp')
readmePath = os.path.join( catchPath, "README.md" )
cmakePath = os.path.join(catchPath, 'CMakeLists.txt')
class Version:
def __init__(self):
f = open( versionPath, 'r' )
for line in f:
m = versionParser.match( line )
if m:
self.variableDecl = m.group(1)
self.majorVersion = int(m.group(2))
self.minorVersion = int(m.group(3))
self.patchNumber = int(m.group(4))
self.branchName = m.group(5)
self.buildNumber = int(m.group(6))
f.close()
def nonDevelopRelease(self):
if self.branchName != "":
self.branchName = ""
self.buildNumber = 0
def developBuild(self):
if self.branchName == "":
self.branchName = "develop"
self.buildNumber = 0
def incrementBuildNumber(self):
self.developBuild()
self.buildNumber = self.buildNumber+1
def incrementPatchNumber(self):
self.nonDevelopRelease()
self.patchNumber = self.patchNumber+1
def incrementMinorVersion(self):
self.nonDevelopRelease()
self.patchNumber = 0
self.minorVersion = self.minorVersion+1
def incrementMajorVersion(self):
self.nonDevelopRelease()
self.patchNumber = 0
self.minorVersion = 0
self.majorVersion = self.majorVersion+1
def getVersionString(self):
versionString = '{0}.{1}.{2}'.format( self.majorVersion, self.minorVersion, self.patchNumber )
if self.branchName != "":
versionString = versionString + '-{0}.{1}'.format( self.branchName, self.buildNumber )
return versionString
def updateVersionFile(self):
f = open( versionPath, 'r' )
lines = []
for line in f:
m = versionParser.match( line )
if m:
lines.append( '{0}( {1}, {2}, {3}, "{4}", {5} );'.format( self.variableDecl, self.majorVersion, self.minorVersion, self.patchNumber, self.branchName, self.buildNumber ) )
else:
lines.append( line.rstrip() )
f.close()
f = open( versionPath, 'w' )
for line in lines:
f.write( line + "\n" )
def updateReadmeFile(version):
import updateWandbox
downloadParser = re.compile( r'<a href=\"https://github.com/catchorg/Catch2/releases/download/v\d+\.\d+\.\d+/catch.hpp\">' )
success, wandboxLink = updateWandbox.uploadFiles()
if not success:
print('Error when uploading to wandbox: {}'.format(wandboxLink))
exit(1)
f = open( readmePath, 'r' )
lines = []
for line in f:
lines.append( line.rstrip() )
f.close()
f = open( readmePath, 'w' )
for line in lines:
line = downloadParser.sub( r'<a href="https://github.com/catchorg/Catch2/releases/download/v{0}/catch.hpp">'.format(version.getVersionString()) , line)
if '[![Try online](https://img.shields.io/badge/try-online-blue.svg)]' in line:
line = '[![Try online](https://img.shields.io/badge/try-online-blue.svg)]({0})'.format(wandboxLink)
f.write( line + "\n" )
def updateCmakeFile(version):
with open(cmakePath, 'rb') as file:
lines = file.readlines()
replacementRegex = re.compile(b'project\\(Catch2 LANGUAGES CXX VERSION \\d+\\.\\d+\\.\\d+\\)')
replacement = 'project(Catch2 LANGUAGES CXX VERSION {0})'.format(version.getVersionString()).encode('ascii')
with open(cmakePath, 'wb') as file:
for line in lines:
file.write(replacementRegex.sub(replacement, line))
def updateVersionDefine(version):
# First member of the tuple is the compiled regex object, the second is replacement if it matches
replacementRegexes = [(re.compile(b'#define CATCH_VERSION_MAJOR \\d+'),'#define CATCH_VERSION_MAJOR {}'.format(version.majorVersion).encode('ascii')),
(re.compile(b'#define CATCH_VERSION_MINOR \\d+'),'#define CATCH_VERSION_MINOR {}'.format(version.minorVersion).encode('ascii')),
(re.compile(b'#define CATCH_VERSION_PATCH \\d+'),'#define CATCH_VERSION_PATCH {}'.format(version.patchNumber).encode('ascii')),
]
with open(definePath, 'rb') as file:
lines = file.readlines()
with open(definePath, 'wb') as file:
for line in lines:
for replacement in replacementRegexes:
line = replacement[0].sub(replacement[1], line)
file.write(line)
def updateVersionPlaceholder(filename, version):
with open(filename, 'rb') as file:
lines = file.readlines()
placeholderRegex = re.compile(b' in Catch X.Y.Z')
replacement = ' in Catch {}.{}.{}'.format(version.majorVersion, version.minorVersion, version.patchNumber).encode('ascii')
with open(filename, 'wb') as file:
for line in lines:
file.write(placeholderRegex.sub(replacement, line))
def updateDocumentationVersionPlaceholders(version):
print('Updating version placeholder in documentation')
docsPath = os.path.join(catchPath, 'docs/')
for basePath, _, files in os.walk(docsPath):
for file in files:
if fnmatch.fnmatch(file, "*.md") and "contributing.md" != file:
updateVersionPlaceholder(os.path.join(basePath, file), version)
def performUpdates(version):
# First update version file, so we can regenerate single header and
# have it ready for upload to wandbox, when updating readme
version.updateVersionFile()
updateVersionDefine(version)
import generateSingleHeader
generateSingleHeader.generate(version)
# Then copy the reporters to single include folder to keep them in sync
# We probably should have some kind of convention to select which reporters need to be copied automagically,
# but this works for now
import shutil
for rep in ('automake', 'tap', 'teamcity', 'sonarqube'):
sourceFile = os.path.join(catchPath, 'include/reporters/catch_reporter_{}.hpp'.format(rep))
destFile = os.path.join(catchPath, 'single_include', 'catch2', 'catch_reporter_{}.hpp'.format(rep))
shutil.copyfile(sourceFile, destFile)
updateReadmeFile(version)
updateCmakeFile(version)
updateDocumentationVersionPlaceholders(version)

65
tools/scripts/releaseNotes.py Executable file
View File

@@ -0,0 +1,65 @@
#!/usr/bin/env python
from __future__ import print_function
import os
import re
import urllib2
import json
from scriptCommon import catchPath
from scriptCommon import runAndCapture
issueNumberRe = re.compile( r'(.*?)#([0-9]*)([^0-9]?.*)' )
rootPath = os.path.join( catchPath, 'include/' )
versionPath = os.path.join( rootPath, "internal/catch_version.hpp" )
hashes = runAndCapture( ['git', 'log', '-2', '--format="%H"', versionPath] )
lines = runAndCapture( ['git', 'log', hashes[1] + ".." + hashes[0], catchPath] )
prevLine = ""
messages = []
dates = []
issues = {}
def getIssueTitle( issueNumber ):
try:
s = urllib2.urlopen("https://api.github.com/repos/philsquared/catch/issues/" + issueNumber ).read()
except:
return "#HTTP Error#"
try:
j = json.loads( s )
return j["title"]
except:
return "#JSON Error#"
for line in lines:
if line.startswith( "commit"):
pass
elif line.startswith( "Author:"):
pass
elif line.startswith( "Date:"):
dates.append( line[5:].lstrip() )
elif line == "" and prevLine == "":
pass
else:
prevLine = line
match = issueNumberRe.match( line )
line2 = ""
while match:
issueNumber = match.group(2)
issue = '#{0} ("{1}")'.format( issueNumber, getIssueTitle( issueNumber ) )
line2 = line2 + match.group(1) + issue
match = issueNumberRe.match( match.group(3) )
if line2 == "":
messages.append( line )
else:
messages.append( line2 )
print("All changes between {0} and {1}:\n".format( dates[-1], dates[0] ))
for line in messages:
print(line)

View File

@@ -0,0 +1,31 @@
import os
import sys
import subprocess
catchPath = os.path.dirname(os.path.dirname(os.path.realpath( os.path.dirname(sys.argv[0]))))
def getBuildExecutable():
if os.name == 'nt':
dir = os.environ.get('CATCH_DEV_OUT_DIR', "cmake-build-debug/projects/SelfTest.exe")
return dir
else:
dir = os.environ.get('CATCH_DEV_OUT_DIR', "cmake-build-debug/projects/SelfTest")
return dir
def runAndCapture( args ):
child = subprocess.Popen(" ".join( args ), shell=True, stdout=subprocess.PIPE)
lines = []
line = ""
while True:
out = child.stdout.read(1)
if out == '' and child.poll():
break
if out != '':
if out == '\n':
lines.append( line )
line = ""
else:
line = line + out
return lines

View File

@@ -0,0 +1,449 @@
#!/usr/bin/env python
#
# updateDocumentToC.py
#
# Insert table of contents at top of Catch markdown documents.
#
# This script is distributed under the GNU General Public License v3.0
#
# It is based on markdown-toclify version 1.7.1 by Sebastian Raschka,
# https://github.com/rasbt/markdown-toclify
#
from __future__ import print_function
import argparse
import glob
import os
import re
import sys
from scriptCommon import catchPath
# Configuration:
minTocEntries = 4
headingExcludeDefault = [1,3,4,5] # use level 2 headers for at default
headingExcludeRelease = [1,3,4,5] # use level 1 headers for release-notes.md
documentsDefault = os.path.join(os.path.relpath(catchPath), 'docs/*.md')
releaseNotesName = 'release-notes.md'
contentTitle = '**Contents**'
contentLineNo = 4
contentLineNdx = contentLineNo - 1
# End configuration
VALIDS = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-&'
def readLines(in_file):
"""Returns a list of lines from a input markdown file."""
with open(in_file, 'r') as inf:
in_contents = inf.read().split('\n')
return in_contents
def removeLines(lines, remove=('[[back to top]', '<a class="mk-toclify"')):
"""Removes existing [back to top] links and <a id> tags."""
if not remove:
return lines[:]
out = []
for l in lines:
if l.startswith(remove):
continue
out.append(l)
return out
def removeToC(lines):
"""Removes existing table of contents starting at index contentLineNdx."""
if not lines[contentLineNdx ].startswith(contentTitle):
return lines[:]
result_top = lines[:contentLineNdx]
pos = contentLineNdx + 1
while lines[pos].startswith('['):
pos = pos + 1
result_bottom = lines[pos + 1:]
return result_top + result_bottom
def dashifyHeadline(line):
"""
Takes a header line from a Markdown document and
returns a tuple of the
'#'-stripped version of the head line,
a string version for <a id=''></a> anchor tags,
and the level of the headline as integer.
E.g.,
>>> dashifyHeadline('### some header lvl3')
('Some header lvl3', 'some-header-lvl3', 3)
"""
stripped_right = line.rstrip('#')
stripped_both = stripped_right.lstrip('#')
level = len(stripped_right) - len(stripped_both)
stripped_wspace = stripped_both.strip()
# GitHub's sluggification works in an interesting way
# 1) '+', '/', '(', ')' and so on are just removed
# 2) spaces are converted into '-' directly
# 3) multiple -- are not collapsed
dashified = ''
for c in stripped_wspace:
if c in VALIDS:
dashified += c.lower()
elif c.isspace():
dashified += '-'
else:
# Unknown symbols are just removed
continue
return [stripped_wspace, dashified, level]
def tagAndCollect(lines, id_tag=True, back_links=False, exclude_h=None):
"""
Gets headlines from the markdown document and creates anchor tags.
Keyword arguments:
lines: a list of sublists where every sublist
represents a line from a Markdown document.
id_tag: if true, creates inserts a the <a id> tags (not req. by GitHub)
back_links: if true, adds "back to top" links below each headline
exclude_h: header levels to exclude. E.g., [2, 3]
excludes level 2 and 3 headings.
Returns a tuple of 2 lists:
1st list:
A modified version of the input list where
<a id="some-header"></a> anchor tags where inserted
above the header lines (if github is False).
2nd list:
A list of 3-value sublists, where the first value
represents the heading, the second value the string
that was inserted assigned to the IDs in the anchor tags,
and the third value is an integer that represents the headline level.
E.g.,
[['some header lvl3', 'some-header-lvl3', 3], ...]
"""
out_contents = []
headlines = []
for l in lines:
saw_headline = False
orig_len = len(l)
l_stripped = l.lstrip()
if l_stripped.startswith(('# ', '## ', '### ', '#### ', '##### ', '###### ')):
# comply with new markdown standards
# not a headline if '#' not followed by whitespace '##no-header':
if not l.lstrip('#').startswith(' '):
continue
# not a headline if more than 6 '#':
if len(l) - len(l.lstrip('#')) > 6:
continue
# headers can be indented by at most 3 spaces:
if orig_len - len(l_stripped) > 3:
continue
# ignore empty headers
if not set(l) - {'#', ' '}:
continue
saw_headline = True
dashified = dashifyHeadline(l)
if not exclude_h or not dashified[-1] in exclude_h:
if id_tag:
id_tag = '<a class="mk-toclify" id="%s"></a>'\
% (dashified[1])
out_contents.append(id_tag)
headlines.append(dashified)
out_contents.append(l)
if back_links and saw_headline:
out_contents.append('[[back to top](#table-of-contents)]')
return out_contents, headlines
def positioningHeadlines(headlines):
"""
Strips unnecessary whitespaces/tabs if first header is not left-aligned
"""
left_just = False
for row in headlines:
if row[-1] == 1:
left_just = True
break
if not left_just:
for row in headlines:
row[-1] -= 1
return headlines
def createToc(headlines, hyperlink=True, top_link=False, no_toc_header=False):
"""
Creates the table of contents from the headline list
that was returned by the tagAndCollect function.
Keyword Arguments:
headlines: list of lists
e.g., ['Some header lvl3', 'some-header-lvl3', 3]
hyperlink: Creates hyperlinks in Markdown format if True,
e.g., '- [Some header lvl1](#some-header-lvl1)'
top_link: if True, add a id tag for linking the table
of contents itself (for the back-to-top-links)
no_toc_header: suppresses TOC header if True.
Returns a list of headlines for a table of contents
in Markdown format,
e.g., [' - [Some header lvl3](#some-header-lvl3)', ...]
"""
processed = []
if not no_toc_header:
if top_link:
processed.append('<a class="mk-toclify" id="table-of-contents"></a>\n')
processed.append(contentTitle + '<br>')
for line in headlines:
if hyperlink:
item = '[%s](#%s)' % (line[0], line[1])
else:
item = '%s- %s' % ((line[2]-1)*' ', line[0])
processed.append(item + '<br>')
processed.append('\n')
return processed
def buildMarkdown(toc_headlines, body, spacer=0, placeholder=None):
"""
Returns a string with the Markdown output contents incl.
the table of contents.
Keyword arguments:
toc_headlines: lines for the table of contents
as created by the createToc function.
body: contents of the Markdown file including
ID-anchor tags as returned by the
tagAndCollect function.
spacer: Adds vertical space after the table
of contents. Height in pixels.
placeholder: If a placeholder string is provided, the placeholder
will be replaced by the TOC instead of inserting the TOC at
the top of the document
"""
if spacer:
spacer_line = ['\n<div style="height:%spx;"></div>\n' % (spacer)]
toc_markdown = "\n".join(toc_headlines + spacer_line)
else:
toc_markdown = "\n".join(toc_headlines)
if placeholder:
body_markdown = "\n".join(body)
markdown = body_markdown.replace(placeholder, toc_markdown)
else:
body_markdown_p1 = "\n".join(body[:contentLineNdx ]) + '\n'
body_markdown_p2 = "\n".join(body[ contentLineNdx:])
markdown = body_markdown_p1 + toc_markdown + body_markdown_p2
return markdown
def outputMarkdown(markdown_cont, output_file):
"""
Writes to an output file if `outfile` is a valid path.
"""
if output_file:
with open(output_file, 'w') as out:
out.write(markdown_cont)
def markdownToclify(
input_file,
output_file=None,
min_toc_len=2,
github=False,
back_to_top=False,
nolink=False,
no_toc_header=False,
spacer=0,
placeholder=None,
exclude_h=None):
""" Function to add table of contents to markdown files.
Parameters
-----------
input_file: str
Path to the markdown input file.
output_file: str (default: None)
Path to the markdown output file.
min_toc_len: int (default: 2)
Miniumum number of entries to create a table of contents for.
github: bool (default: False)
Uses GitHub TOC syntax if True.
back_to_top: bool (default: False)
Inserts back-to-top links below headings if True.
nolink: bool (default: False)
Creates the table of contents without internal links if True.
no_toc_header: bool (default: False)
Suppresses the Table of Contents header if True
spacer: int (default: 0)
Inserts horizontal space (in pixels) after the table of contents.
placeholder: str (default: None)
Inserts the TOC at the placeholder string instead
of inserting the TOC at the top of the document.
exclude_h: list (default None)
Excludes header levels, e.g., if [2, 3], ignores header
levels 2 and 3 in the TOC.
Returns
-----------
changed: Boolean
True if the file has been updated, False otherwise.
"""
cleaned_contents = removeLines(
removeToC(readLines(input_file)),
remove=('[[back to top]', '<a class="mk-toclify"'))
processed_contents, raw_headlines = tagAndCollect(
cleaned_contents,
id_tag=not github,
back_links=back_to_top,
exclude_h=exclude_h)
# add table of contents?
if len(raw_headlines) < min_toc_len:
processed_headlines = []
else:
leftjustified_headlines = positioningHeadlines(raw_headlines)
processed_headlines = createToc(
leftjustified_headlines,
hyperlink=not nolink,
top_link=not nolink and not github,
no_toc_header=no_toc_header)
if nolink:
processed_contents = cleaned_contents
cont = buildMarkdown(
toc_headlines=processed_headlines,
body=processed_contents,
spacer=spacer,
placeholder=placeholder)
if output_file:
outputMarkdown(cont, output_file)
def isReleaseNotes(f):
return os.path.basename(f) == releaseNotesName
def excludeHeadingsFor(f):
return headingExcludeRelease if isReleaseNotes(f) else headingExcludeDefault
def updateSingleDocumentToC(input_file, min_toc_len, verbose=False):
"""Add or update table of contents in specified file. Return 1 if file changed, 0 otherwise."""
if verbose :
print( 'file: {}'.format(input_file))
output_file = input_file + '.tmp'
markdownToclify(
input_file=input_file,
output_file=output_file,
min_toc_len=min_toc_len,
github=True,
back_to_top=False,
nolink=False,
no_toc_header=False,
spacer=False,
placeholder=False,
exclude_h=excludeHeadingsFor(input_file))
# prevent race-condition (Python 3.3):
if sys.version_info >= (3, 3):
os.replace(output_file, input_file)
else:
os.remove(input_file)
os.rename(output_file, input_file)
return 1
def updateDocumentToC(paths, min_toc_len, verbose):
"""Add or update table of contents to specified paths. Return number of changed files"""
n = 0
for g in paths:
for f in glob.glob(g):
if os.path.isfile(f):
n = n + updateSingleDocumentToC(input_file=f, min_toc_len=min_toc_len, verbose=verbose)
return n
def updateDocumentToCMain():
"""Add or update table of contents to specified paths."""
parser = argparse.ArgumentParser(
description='Add or update table of contents in markdown documents.',
epilog="""""",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'Input',
metavar='file',
type=str,
nargs=argparse.REMAINDER,
help='files to process, at default: docs/*.md')
parser.add_argument(
'-v', '--verbose',
action='store_true',
help='report the name of the file being processed')
parser.add_argument(
'--min-toc-entries',
dest='minTocEntries',
default=minTocEntries,
type=int,
metavar='N',
help='the minimum number of entries to create a table of contents for [{default}]'.format(default=minTocEntries))
parser.add_argument(
'--remove-toc',
action='store_const',
dest='minTocEntries',
const=99,
help='remove all tables of contents')
args = parser.parse_args()
paths = args.Input if args.Input else [documentsDefault]
changedFiles = updateDocumentToC(paths=paths, min_toc_len=args.minTocEntries, verbose=args.verbose)
if changedFiles > 0:
print( "Processed table of contents in " + str(changedFiles) + " file(s)" )
else:
print( "No table of contents added or updated" )
if __name__ == '__main__':
updateDocumentToCMain()
# end of file

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python
import json
import os
import urllib2
from scriptCommon import catchPath
def upload(options):
request = urllib2.Request('http://melpon.org/wandbox/api/compile.json')
request.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(request, json.dumps(options))
return json.loads(response.read())
main_file = '''
#define CATCH_CONFIG_MAIN // This tells Catch to provide a main() - only do this in one cpp file
#include "catch.hpp"
unsigned int Factorial( unsigned int number ) {
return number <= 1 ? number : Factorial(number-1)*number;
}
TEST_CASE( "Factorials are computed", "[factorial]" ) {
REQUIRE( Factorial(1) == 1 );
REQUIRE( Factorial(2) == 2 );
REQUIRE( Factorial(3) == 6 );
REQUIRE( Factorial(10) == 3628800 );
}
'''
def uploadFiles():
response = upload({
'compiler': 'gcc-head',
'code': main_file,
'codes': [{
'file': 'catch.hpp',
'code': open(os.path.join(catchPath, 'single_include', 'catch2', 'catch.hpp')).read()
}],
'options': 'c++11,cpp-no-pedantic,boost-nothing',
'compiler-option-raw': '-DCATCH_CONFIG_FAST_COMPILE',
'save': True
})
if 'url' in response and 'compiler_error' not in response:
return True, response['url']
else:
return False, response