2017-01-20 12:28:40 +01:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
2017-02-15 12:54:47 +01:00
|
|
|
from __future__ import print_function
|
2014-03-08 11:31:38 +01:00
|
|
|
|
2012-11-15 23:15:41 +01:00
|
|
|
import os
|
2018-01-18 16:28:19 +01:00
|
|
|
import io
|
2012-11-15 23:15:41 +01:00
|
|
|
import sys
|
|
|
|
import re
|
|
|
|
import datetime
|
2017-07-09 20:58:51 +02:00
|
|
|
from glob import glob
|
2012-11-15 23:15:41 +01:00
|
|
|
|
2013-04-24 19:58:57 +02:00
|
|
|
from scriptCommon import catchPath
|
2017-08-24 21:59:06 +02:00
|
|
|
|
|
|
|
def generate(v):
|
|
|
|
includesParser = re.compile( r'\s*#\s*include\s*"(.*)"' )
|
|
|
|
guardParser = re.compile( r'\s*#.*(TWOBLUECUBES_)?CATCH_.*_INCLUDED')
|
|
|
|
defineParser = re.compile( r'\s*#define\s+(TWOBLUECUBES_)?CATCH_.*_INCLUDED')
|
|
|
|
ifParser = re.compile( r'\s*#ifndef (TWOBLUECUBES_)?CATCH_.*_INCLUDED')
|
|
|
|
endIfParser = re.compile( r'\s*#endif // (TWOBLUECUBES_)?CATCH_.*_INCLUDED')
|
|
|
|
ifImplParser = re.compile( r'\s*#ifdef CATCH_CONFIG_RUNNER' )
|
|
|
|
commentParser1 = re.compile( r'^\s*/\*')
|
|
|
|
commentParser2 = re.compile( r'^ \*')
|
|
|
|
blankParser = re.compile( r'^\s*$')
|
|
|
|
|
|
|
|
seenHeaders = set([])
|
2019-11-19 05:22:38 +01:00
|
|
|
possibleHeaders = set([])
|
2017-08-24 21:59:06 +02:00
|
|
|
rootPath = os.path.join( catchPath, 'include/' )
|
2018-06-11 10:48:10 +02:00
|
|
|
outputPath = os.path.join( catchPath, 'single_include/catch2/catch.hpp' )
|
2017-08-24 21:59:06 +02:00
|
|
|
|
|
|
|
globals = {
|
|
|
|
'includeImpl' : True,
|
|
|
|
'ifdefs' : 0,
|
|
|
|
'implIfDefs' : -1
|
|
|
|
}
|
|
|
|
|
|
|
|
for arg in sys.argv[1:]:
|
2018-04-19 15:02:15 +02:00
|
|
|
arg = arg.lower()
|
2017-08-24 21:59:06 +02:00
|
|
|
if arg == "noimpl":
|
|
|
|
globals['includeImpl'] = False
|
|
|
|
print( "Not including impl code" )
|
2013-12-04 21:25:14 +01:00
|
|
|
else:
|
2017-08-24 21:59:06 +02:00
|
|
|
print( "\n** Unrecognised argument: " + arg + " **\n" )
|
|
|
|
exit(1)
|
|
|
|
|
|
|
|
|
|
|
|
# ensure that the output directory exists (hopefully no races)
|
|
|
|
outDir = os.path.dirname(outputPath)
|
|
|
|
if not os.path.exists(outDir):
|
|
|
|
os.makedirs(outDir)
|
2018-08-29 12:36:43 +02:00
|
|
|
out = io.open( outputPath, 'w', newline='\n', encoding='utf-8')
|
2017-08-24 21:59:06 +02:00
|
|
|
|
|
|
|
def write( line ):
|
|
|
|
if globals['includeImpl'] or globals['implIfDefs'] == -1:
|
2018-04-19 22:03:25 +02:00
|
|
|
out.write( line )
|
2017-08-24 21:59:06 +02:00
|
|
|
|
2019-11-19 05:22:38 +01:00
|
|
|
def getDirsToSearch( ):
|
|
|
|
return [os.path.join( rootPath, s) for s in ['', 'internal', 'reporters', 'internal/benchmark', 'internal/benchmark/detail']]
|
|
|
|
|
|
|
|
def collectPossibleHeaders():
|
|
|
|
dirs = getDirsToSearch()
|
|
|
|
for dir in dirs:
|
|
|
|
hpps = glob(os.path.join(dir, '*.hpp'))
|
|
|
|
hs = glob(os.path.join(dir, '*.h'))
|
|
|
|
possibleHeaders.update( hpp.rpartition( os.sep )[2] for hpp in hpps )
|
|
|
|
possibleHeaders.update( h.rpartition( os.sep )[2] for h in hs )
|
|
|
|
|
|
|
|
|
2017-08-24 21:59:06 +02:00
|
|
|
def insertCpps():
|
2019-11-19 05:22:38 +01:00
|
|
|
dirs = getDirsToSearch()
|
2017-08-24 21:59:06 +02:00
|
|
|
cppFiles = []
|
|
|
|
for dir in dirs:
|
|
|
|
cppFiles += glob(os.path.join(dir, '*.cpp'))
|
2017-08-30 15:42:23 +02:00
|
|
|
# To minimize random diffs, sort the files before processing them
|
|
|
|
for fname in sorted(cppFiles):
|
2017-08-24 21:59:06 +02:00
|
|
|
dir, name = fname.rsplit(os.path.sep, 1)
|
|
|
|
dir += os.path.sep
|
|
|
|
parseFile(dir, name)
|
|
|
|
|
|
|
|
def parseFile( path, filename ):
|
2018-04-19 22:03:25 +02:00
|
|
|
f = io.open( os.path.join(path, filename), 'r', encoding='utf-8' )
|
2017-08-24 21:59:06 +02:00
|
|
|
blanks = 0
|
2018-04-19 22:03:25 +02:00
|
|
|
write( u"// start {0}\n".format( filename ) )
|
2017-08-24 21:59:06 +02:00
|
|
|
for line in f:
|
|
|
|
if '// ~*~* CATCH_CPP_STITCH_PLACE *~*~' in line:
|
|
|
|
insertCpps()
|
|
|
|
continue
|
|
|
|
elif ifParser.match( line ):
|
|
|
|
globals['ifdefs'] += 1
|
|
|
|
elif endIfParser.match( line ):
|
|
|
|
globals['ifdefs'] -= 1
|
|
|
|
if globals['ifdefs'] == globals['implIfDefs']:
|
|
|
|
globals['implIfDefs'] = -1
|
|
|
|
m = includesParser.match( line )
|
|
|
|
if m:
|
|
|
|
header = m.group(1)
|
|
|
|
headerPath, sep, headerFile = header.rpartition( "/" )
|
2018-04-19 15:02:15 +02:00
|
|
|
if headerFile not in seenHeaders:
|
2017-08-24 21:59:06 +02:00
|
|
|
if headerFile != "tbc_text_format.h" and headerFile != "clara.h":
|
|
|
|
seenHeaders.add( headerFile )
|
|
|
|
if headerPath == "internal" and path.endswith("internal/"):
|
|
|
|
headerPath = ""
|
|
|
|
sep = ""
|
|
|
|
if os.path.exists( path + headerPath + sep + headerFile ):
|
|
|
|
parseFile( path + headerPath + sep, headerFile )
|
|
|
|
else:
|
|
|
|
parseFile( rootPath + headerPath + sep, headerFile )
|
|
|
|
else:
|
|
|
|
if ifImplParser.match(line):
|
|
|
|
globals['implIfDefs'] = globals['ifdefs']
|
|
|
|
if (not guardParser.match( line ) or defineParser.match( line ) ) and not commentParser1.match( line )and not commentParser2.match( line ):
|
|
|
|
if blankParser.match( line ):
|
|
|
|
blanks = blanks + 1
|
|
|
|
else:
|
|
|
|
blanks = 0
|
|
|
|
if blanks < 2 and not defineParser.match(line):
|
|
|
|
write( line.rstrip() + "\n" )
|
2018-04-19 22:03:25 +02:00
|
|
|
write( u'// end {}\n'.format(filename) )
|
|
|
|
|
2019-11-19 05:22:38 +01:00
|
|
|
def warnUnparsedHeaders():
|
|
|
|
unparsedHeaders = possibleHeaders.difference( seenHeaders )
|
|
|
|
# These headers aren't packaged into the unified header, exclude them from any warning
|
|
|
|
whitelist = ['catch.hpp', 'catch_reporter_teamcity.hpp', 'catch_with_main.hpp', 'catch_reporter_automake.hpp', 'catch_reporter_tap.hpp', 'catch_reporter_sonarqube.hpp']
|
|
|
|
unparsedHeaders = unparsedHeaders.difference( whitelist )
|
|
|
|
if unparsedHeaders:
|
|
|
|
print( "WARNING: unparsed headers detected\n{0}\n".format( unparsedHeaders ) )
|
2018-04-19 22:03:25 +02:00
|
|
|
|
|
|
|
write( u"/*\n" )
|
|
|
|
write( u" * Catch v{0}\n".format( v.getVersionString() ) )
|
|
|
|
write( u" * Generated: {0}\n".format( datetime.datetime.now() ) )
|
|
|
|
write( u" * ----------------------------------------------------------\n" )
|
|
|
|
write( u" * This file has been merged from multiple headers. Please don't edit it directly\n" )
|
|
|
|
write( u" * Copyright (c) {} Two Blue Cubes Ltd. All rights reserved.\n".format( datetime.date.today().year ) )
|
|
|
|
write( u" *\n" )
|
|
|
|
write( u" * Distributed under the Boost Software License, Version 1.0. (See accompanying\n" )
|
|
|
|
write( u" * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n" )
|
|
|
|
write( u" */\n" )
|
|
|
|
write( u"#ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED\n" )
|
|
|
|
write( u"#define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED\n" )
|
2017-08-24 21:59:06 +02:00
|
|
|
|
2019-11-19 05:22:38 +01:00
|
|
|
collectPossibleHeaders()
|
2017-08-24 21:59:06 +02:00
|
|
|
parseFile( rootPath, 'catch.hpp' )
|
2019-11-19 05:22:38 +01:00
|
|
|
warnUnparsedHeaders()
|
2017-08-24 21:59:06 +02:00
|
|
|
|
2018-04-19 22:03:25 +02:00
|
|
|
write( u"#endif // TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED\n\n" )
|
2017-08-24 21:59:06 +02:00
|
|
|
out.close()
|
2019-11-19 05:22:38 +01:00
|
|
|
print( "Generated single include for Catch v{0}\n".format( v.getVersionString() ) )
|
2017-08-24 21:59:06 +02:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
from releaseCommon import Version
|
|
|
|
generate(Version())
|