A36 PCB1.1 软件工程整理
This commit is contained in:
284
fr3092_mcu/components/modules/littlefs/scripts/code.py
Normal file
284
fr3092_mcu/components/modules/littlefs/scripts/code.py
Normal file
@ -0,0 +1,284 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find code size at the function level. Basically just a bit wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['code_size']))
|
||||
for result in r
|
||||
if result.get('code_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('code_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['code_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'code_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find code size at the function level.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find code sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level code sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total code size.")
|
||||
parser.add_argument('--type', default='tTrRdD',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
help="Path to the nm tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
323
fr3092_mcu/components/modules/littlefs/scripts/coverage.py
Normal file
323
fr3092_mcu/components/modules/littlefs/scripts/coverage.py
Normal file
@ -0,0 +1,323 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Parse and report coverage info from .info files generated by lcov
|
||||
#
|
||||
import os
|
||||
import glob
|
||||
import csv
|
||||
import re
|
||||
import collections as co
|
||||
import bisect as b
|
||||
|
||||
|
||||
INFO_PATHS = ['tests/*.toml.info']
|
||||
|
||||
def collect(paths, **args):
|
||||
file = None
|
||||
funcs = []
|
||||
lines = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<file>SF:/?(?P<file_name>.*))$'
|
||||
'|^(?P<func>FN:(?P<func_lineno>[0-9]*),(?P<func_name>.*))$'
|
||||
'|^(?P<line>DA:(?P<line_lineno>[0-9]*),(?P<line_hits>[0-9]*))$')
|
||||
for path in paths:
|
||||
with open(path) as f:
|
||||
for line in f:
|
||||
m = pattern.match(line)
|
||||
if m and m.group('file'):
|
||||
file = m.group('file_name')
|
||||
elif m and file and m.group('func'):
|
||||
funcs.append((file, int(m.group('func_lineno')),
|
||||
m.group('func_name')))
|
||||
elif m and file and m.group('line'):
|
||||
lines[(file, int(m.group('line_lineno')))] += (
|
||||
int(m.group('line_hits')))
|
||||
|
||||
# map line numbers to functions
|
||||
funcs.sort()
|
||||
def func_from_lineno(file, lineno):
|
||||
i = b.bisect(funcs, (file, lineno))
|
||||
if i and funcs[i-1][0] == file:
|
||||
return funcs[i-1][2]
|
||||
else:
|
||||
return None
|
||||
|
||||
# reduce to function info
|
||||
reduced_funcs = co.defaultdict(lambda: (0, 0))
|
||||
for (file, line_lineno), line_hits in lines.items():
|
||||
func = func_from_lineno(file, line_lineno)
|
||||
if not func:
|
||||
continue
|
||||
hits, count = reduced_funcs[(file, func)]
|
||||
reduced_funcs[(file, func)] = (hits + (line_hits > 0), count + 1)
|
||||
|
||||
results = []
|
||||
for (file, func), (hits, count) in reduced_funcs.items():
|
||||
# discard internal/testing functions (test_* injected with
|
||||
# internal testing)
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__') or func.startswith('test_'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
results.append((file, func, hits, count))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find coverage
|
||||
if not args.get('use'):
|
||||
# find *.info files
|
||||
paths = []
|
||||
for path in args['info_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.gcov'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .info files found in %r?' % args['info_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
|
||||
total_hits, total_count = 0, 0
|
||||
for _, _, hits, count in results:
|
||||
total_hits += hits
|
||||
total_count += count
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['coverage_hits']),
|
||||
int(result['coverage_count']))
|
||||
for result in r
|
||||
if result.get('coverage_hits') not in {None, ''}
|
||||
if result.get('coverage_count') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_hits, prev_total_count = 0, 0
|
||||
for _, _, hits, count in prev_results:
|
||||
prev_total_hits += hits
|
||||
prev_total_count += count
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('coverage_hits', None)
|
||||
result.pop('coverage_count', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, hits, count in results:
|
||||
merged_results[(file, func)]['coverage_hits'] = hits
|
||||
merged_results[(file, func)]['coverage_count'] = count
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'coverage_hits', 'coverage_count'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0))
|
||||
for file, func, hits, count in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_hits, entry_count = entries[entry]
|
||||
entries[entry] = (entry_hits + hits, entry_count + count)
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0, 0, 0, 0))
|
||||
for name, (new_hits, new_count) in news.items():
|
||||
diff[name] = (
|
||||
0, 0,
|
||||
new_hits, new_count,
|
||||
new_hits, new_count,
|
||||
(new_hits/new_count if new_count else 1.0) - 1.0)
|
||||
for name, (old_hits, old_count) in olds.items():
|
||||
_, _, new_hits, new_count, _, _, _ = diff[name]
|
||||
diff[name] = (
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
new_hits-old_hits, new_count-old_count,
|
||||
((new_hits/new_count if new_count else 1.0)
|
||||
- (old_hits/old_count if old_count else 1.0)))
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][0]/x[1][1] if x[1][1] else -1), x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
elif args.get('reverse_coverage_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2]/x[1][3] if x[1][3] else -1), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %19s' % (by, 'hits/line'))
|
||||
else:
|
||||
print('%-36s %19s %19s %11s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, hits, count):
|
||||
print("%-36s %11s %7s" % (name,
|
||||
'%d/%d' % (hits, count)
|
||||
if count else '-',
|
||||
'%.1f%%' % (100*hits/count)
|
||||
if count else '-'))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio):
|
||||
print("%-36s %11s %7s %11s %7s %11s%s" % (name,
|
||||
'%d/%d' % (old_hits, old_count)
|
||||
if old_count else '-',
|
||||
'%.1f%%' % (100*old_hits/old_count)
|
||||
if old_count else '-',
|
||||
'%d/%d' % (new_hits, new_count)
|
||||
if new_count else '-',
|
||||
'%.1f%%' % (100*new_hits/new_count)
|
||||
if new_count else '-',
|
||||
'%+d/%+d' % (diff_hits, diff_count),
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, (hits, count) in sorted_entries(entries.items()):
|
||||
print_entry(name, hits, count)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, _, _, new, _, _, _ in diff.values() if not new)))
|
||||
for name, (
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name,
|
||||
old_hits, old_count,
|
||||
new_hits, new_count,
|
||||
diff_hits, diff_count,
|
||||
ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_hits, total_count)
|
||||
else:
|
||||
ratio = ((total_hits/total_count
|
||||
if total_count else 1.0)
|
||||
- (prev_total_hits/prev_total_count
|
||||
if prev_total_count else 1.0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_hits, prev_total_count,
|
||||
total_hits, total_count,
|
||||
total_hits-prev_total_hits, total_count-prev_total_count,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Parse and report coverage info from .info files \
|
||||
generated by lcov")
|
||||
parser.add_argument('info_paths', nargs='*', default=INFO_PATHS,
|
||||
help="Description of where to find *.info files. May be a directory \
|
||||
or list of paths. *.info files will be merged to show the total \
|
||||
coverage. Defaults to %r." % INFO_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't do any work, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff code size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--coverage-sort', action='store_true',
|
||||
help="Sort by coverage.")
|
||||
parser.add_argument('-S', '--reverse-coverage-sort', action='store_true',
|
||||
help="Sort by coverage, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level coverage.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total coverage.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
283
fr3092_mcu/components/modules/littlefs/scripts/data.py
Normal file
283
fr3092_mcu/components/modules/littlefs/scripts/data.py
Normal file
@ -0,0 +1,283 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find data size at the function level. Basically just a bit wrapper
|
||||
# around nm with some extra conveniences for comparing builds. Heavily inspired
|
||||
# by Linux's Bloat-O-Meter.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
results = co.defaultdict(lambda: 0)
|
||||
pattern = re.compile(
|
||||
'^(?P<size>[0-9a-fA-F]+)' +
|
||||
' (?P<type>[%s])' % re.escape(args['type']) +
|
||||
' (?P<func>.+?)$')
|
||||
for path in paths:
|
||||
# note nm-tool may contain extra args
|
||||
cmd = args['nm_tool'] + ['--size-sort', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
m = pattern.match(line)
|
||||
if m:
|
||||
results[(path, m.group('func'))] += int(m.group('size'), 16)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, func), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
# discard internal functions
|
||||
if not args.get('everything'):
|
||||
if func.startswith('__'):
|
||||
continue
|
||||
# discard .8449 suffixes created by optimizer
|
||||
func = re.sub('\.[0-9]+', '', func)
|
||||
flat_results.append((file, func, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['data_size']))
|
||||
for result in r
|
||||
if result.get('data_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('data_size', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, size in results:
|
||||
merged_results[(file, func)]['data_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'data_size'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, func, size in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find data size at the function level.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find data sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff data size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level data sizes. Note this does not include padding! "
|
||||
"So sizes may differ from other tools.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total data size.")
|
||||
parser.add_argument('--type', default='dDbB',
|
||||
help="Type of symbols to report, this uses the same single-character "
|
||||
"type-names emitted by nm. Defaults to %(default)r.")
|
||||
parser.add_argument('--nm-tool', default=['nm'], type=lambda x: x.split(),
|
||||
help="Path to the nm tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
@ -0,0 +1,383 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import re
|
||||
import sys
|
||||
|
||||
PATTERN = ['LFS_ASSERT', 'assert']
|
||||
PREFIX = 'LFS'
|
||||
MAXWIDTH = 16
|
||||
|
||||
ASSERT = "__{PREFIX}_ASSERT_{TYPE}_{COMP}"
|
||||
FAIL = """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_fail_{type}(
|
||||
const char *file, int line, const char *comp,
|
||||
{ctype} lh, size_t lsize,
|
||||
{ctype} rh, size_t rsize) {{
|
||||
printf("%s:%d:assert: assert failed with ", file, line);
|
||||
__{prefix}_assert_print_{type}(lh, lsize);
|
||||
printf(", expected %s ", comp);
|
||||
__{prefix}_assert_print_{type}(rh, rsize);
|
||||
printf("\\n");
|
||||
fflush(NULL);
|
||||
raise(SIGABRT);
|
||||
}}
|
||||
"""
|
||||
|
||||
COMP = {
|
||||
'==': 'eq',
|
||||
'!=': 'ne',
|
||||
'<=': 'le',
|
||||
'>=': 'ge',
|
||||
'<': 'lt',
|
||||
'>': 'gt',
|
||||
}
|
||||
|
||||
TYPE = {
|
||||
'int': {
|
||||
'ctype': 'intmax_t',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
(void)size;
|
||||
printf("%"PRIiMAX, v);
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
|
||||
do {{
|
||||
__typeof__(lh) _lh = lh;
|
||||
__typeof__(lh) _rh = (__typeof__(lh))rh;
|
||||
if (!(_lh {op} _rh)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
(intmax_t)_lh, 0, (intmax_t)_rh, 0);
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
},
|
||||
'bool': {
|
||||
'ctype': 'bool',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
(void)size;
|
||||
printf("%s", v ? "true" : "false");
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
|
||||
do {{
|
||||
bool _lh = !!(lh);
|
||||
bool _rh = !!(rh);
|
||||
if (!(_lh {op} _rh)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
_lh, 0, _rh, 0);
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
},
|
||||
'mem': {
|
||||
'ctype': 'const void *',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
const uint8_t *s = v;
|
||||
printf("\\\"");
|
||||
for (size_t i = 0; i < size && i < {maxwidth}; i++) {{
|
||||
if (s[i] >= ' ' && s[i] <= '~') {{
|
||||
printf("%c", s[i]);
|
||||
}} else {{
|
||||
printf("\\\\x%02x", s[i]);
|
||||
}}
|
||||
}}
|
||||
if (size > {maxwidth}) {{
|
||||
printf("...");
|
||||
}}
|
||||
printf("\\\"");
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh, size)
|
||||
do {{
|
||||
const void *_lh = lh;
|
||||
const void *_rh = rh;
|
||||
if (!(memcmp(_lh, _rh, size) {op} 0)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
_lh, size, _rh, size);
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
},
|
||||
'str': {
|
||||
'ctype': 'const char *',
|
||||
'fail': FAIL,
|
||||
'print': """
|
||||
__attribute__((unused))
|
||||
static void __{prefix}_assert_print_{type}({ctype} v, size_t size) {{
|
||||
__{prefix}_assert_print_mem(v, size);
|
||||
}}
|
||||
""",
|
||||
'assert': """
|
||||
#define __{PREFIX}_ASSERT_{TYPE}_{COMP}(file, line, lh, rh)
|
||||
do {{
|
||||
const char *_lh = lh;
|
||||
const char *_rh = rh;
|
||||
if (!(strcmp(_lh, _rh) {op} 0)) {{
|
||||
__{prefix}_assert_fail_{type}(file, line, "{comp}",
|
||||
_lh, strlen(_lh), _rh, strlen(_rh));
|
||||
}}
|
||||
}} while (0)
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
def mkdecls(outf, maxwidth=16):
|
||||
outf.write("#include <stdio.h>\n")
|
||||
outf.write("#include <stdbool.h>\n")
|
||||
outf.write("#include <stdint.h>\n")
|
||||
outf.write("#include <inttypes.h>\n")
|
||||
outf.write("#include <signal.h>\n")
|
||||
|
||||
for type, desc in sorted(TYPE.items()):
|
||||
format = {
|
||||
'type': type.lower(), 'TYPE': type.upper(),
|
||||
'ctype': desc['ctype'],
|
||||
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
|
||||
'maxwidth': maxwidth,
|
||||
}
|
||||
outf.write(re.sub('\s+', ' ',
|
||||
desc['print'].strip().format(**format))+'\n')
|
||||
outf.write(re.sub('\s+', ' ',
|
||||
desc['fail'].strip().format(**format))+'\n')
|
||||
|
||||
for op, comp in sorted(COMP.items()):
|
||||
format.update({
|
||||
'comp': comp.lower(), 'COMP': comp.upper(),
|
||||
'op': op,
|
||||
})
|
||||
outf.write(re.sub('\s+', ' ',
|
||||
desc['assert'].strip().format(**format))+'\n')
|
||||
|
||||
def mkassert(type, comp, lh, rh, size=None):
|
||||
format = {
|
||||
'type': type.lower(), 'TYPE': type.upper(),
|
||||
'comp': comp.lower(), 'COMP': comp.upper(),
|
||||
'prefix': PREFIX.lower(), 'PREFIX': PREFIX.upper(),
|
||||
'lh': lh.strip(' '),
|
||||
'rh': rh.strip(' '),
|
||||
'size': size,
|
||||
}
|
||||
if size:
|
||||
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh}, {size})')
|
||||
.format(**format))
|
||||
else:
|
||||
return ((ASSERT + '(__FILE__, __LINE__, {lh}, {rh})')
|
||||
.format(**format))
|
||||
|
||||
|
||||
# simple recursive descent parser
|
||||
LEX = {
|
||||
'ws': [r'(?:\s|\n|#.*?\n|//.*?\n|/\*.*?\*/)+'],
|
||||
'assert': PATTERN,
|
||||
'string': [r'"(?:\\.|[^"])*"', r"'(?:\\.|[^'])\'"],
|
||||
'arrow': ['=>'],
|
||||
'paren': ['\(', '\)'],
|
||||
'op': ['strcmp', 'memcmp', '->'],
|
||||
'comp': ['==', '!=', '<=', '>=', '<', '>'],
|
||||
'logic': ['\&\&', '\|\|'],
|
||||
'sep': [':', ';', '\{', '\}', ','],
|
||||
}
|
||||
|
||||
class ParseFailure(Exception):
|
||||
def __init__(self, expected, found):
|
||||
self.expected = expected
|
||||
self.found = found
|
||||
|
||||
def __str__(self):
|
||||
return "expected %r, found %s..." % (
|
||||
self.expected, repr(self.found)[:70])
|
||||
|
||||
class Parse:
|
||||
def __init__(self, inf, lexemes):
|
||||
p = '|'.join('(?P<%s>%s)' % (n, '|'.join(l))
|
||||
for n, l in lexemes.items())
|
||||
p = re.compile(p, re.DOTALL)
|
||||
data = inf.read()
|
||||
tokens = []
|
||||
while True:
|
||||
m = p.search(data)
|
||||
if m:
|
||||
if m.start() > 0:
|
||||
tokens.append((None, data[:m.start()]))
|
||||
tokens.append((m.lastgroup, m.group()))
|
||||
data = data[m.end():]
|
||||
else:
|
||||
tokens.append((None, data))
|
||||
break
|
||||
self.tokens = tokens
|
||||
self.off = 0
|
||||
|
||||
def lookahead(self, *pattern):
|
||||
if self.off < len(self.tokens):
|
||||
token = self.tokens[self.off]
|
||||
if token[0] in pattern or token[1] in pattern:
|
||||
self.m = token[1]
|
||||
return self.m
|
||||
self.m = None
|
||||
return self.m
|
||||
|
||||
def accept(self, *patterns):
|
||||
m = self.lookahead(*patterns)
|
||||
if m is not None:
|
||||
self.off += 1
|
||||
return m
|
||||
|
||||
def expect(self, *patterns):
|
||||
m = self.accept(*patterns)
|
||||
if not m:
|
||||
raise ParseFailure(patterns, self.tokens[self.off:])
|
||||
return m
|
||||
|
||||
def push(self):
|
||||
return self.off
|
||||
|
||||
def pop(self, state):
|
||||
self.off = state
|
||||
|
||||
def passert(p):
|
||||
def pastr(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
p.expect('strcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
comp = p.expect('comp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('str', COMP[comp], lh, rh)
|
||||
|
||||
def pamem(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
p.expect('memcmp') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
rh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(',') ; p.accept('ws')
|
||||
size = pexpr(p) ; p.accept('ws')
|
||||
p.expect(')') ; p.accept('ws')
|
||||
comp = p.expect('comp') ; p.accept('ws')
|
||||
p.expect('0') ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('mem', COMP[comp], lh, rh, size)
|
||||
|
||||
def paint(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexpr(p) ; p.accept('ws')
|
||||
comp = p.expect('comp') ; p.accept('ws')
|
||||
rh = pexpr(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('int', COMP[comp], lh, rh)
|
||||
|
||||
def pabool(p):
|
||||
p.expect('assert') ; p.accept('ws') ; p.expect('(') ; p.accept('ws')
|
||||
lh = pexprs(p) ; p.accept('ws')
|
||||
p.expect(')')
|
||||
return mkassert('bool', 'eq', lh, 'true')
|
||||
|
||||
def pa(p):
|
||||
return p.expect('assert')
|
||||
|
||||
state = p.push()
|
||||
lastf = None
|
||||
for pa in [pastr, pamem, paint, pabool, pa]:
|
||||
try:
|
||||
return pa(p)
|
||||
except ParseFailure as f:
|
||||
p.pop(state)
|
||||
lastf = f
|
||||
else:
|
||||
raise lastf
|
||||
|
||||
def pexpr(p):
|
||||
res = []
|
||||
while True:
|
||||
if p.accept('('):
|
||||
res.append(p.m)
|
||||
while True:
|
||||
res.append(pexprs(p))
|
||||
if p.accept('sep'):
|
||||
res.append(p.m)
|
||||
else:
|
||||
break
|
||||
res.append(p.expect(')'))
|
||||
elif p.lookahead('assert'):
|
||||
res.append(passert(p))
|
||||
elif p.accept('assert', 'ws', 'string', 'op', None):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def pexprs(p):
|
||||
res = []
|
||||
while True:
|
||||
res.append(pexpr(p))
|
||||
if p.accept('comp', 'logic', ','):
|
||||
res.append(p.m)
|
||||
else:
|
||||
return ''.join(res)
|
||||
|
||||
def pstmt(p):
|
||||
ws = p.accept('ws') or ''
|
||||
lh = pexprs(p)
|
||||
if p.accept('=>'):
|
||||
rh = pexprs(p)
|
||||
return ws + mkassert('int', 'eq', lh, rh)
|
||||
else:
|
||||
return ws + lh
|
||||
|
||||
|
||||
def main(args):
|
||||
inf = open(args.input, 'r') if args.input else sys.stdin
|
||||
outf = open(args.output, 'w') if args.output else sys.stdout
|
||||
|
||||
lexemes = LEX.copy()
|
||||
if args.pattern:
|
||||
lexemes['assert'] = args.pattern
|
||||
p = Parse(inf, lexemes)
|
||||
|
||||
# write extra verbose asserts
|
||||
mkdecls(outf, maxwidth=args.maxwidth)
|
||||
if args.input:
|
||||
outf.write("#line %d \"%s\"\n" % (1, args.input))
|
||||
|
||||
# parse and write out stmt at a time
|
||||
try:
|
||||
while True:
|
||||
outf.write(pstmt(p))
|
||||
if p.accept('sep'):
|
||||
outf.write(p.m)
|
||||
else:
|
||||
break
|
||||
except ParseFailure as f:
|
||||
pass
|
||||
|
||||
for i in range(p.off, len(p.tokens)):
|
||||
outf.write(p.tokens[i][1])
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Cpp step that increases assert verbosity")
|
||||
parser.add_argument('input', nargs='?',
|
||||
help="Input C file after cpp.")
|
||||
parser.add_argument('-o', '--output', required=True,
|
||||
help="Output C file.")
|
||||
parser.add_argument('-p', '--pattern', action='append',
|
||||
help="Patterns to search for starting an assert statement.")
|
||||
parser.add_argument('--maxwidth', default=MAXWIDTH, type=int,
|
||||
help="Maximum number of characters to display for strcmp and memcmp.")
|
||||
main(parser.parse_args())
|
61
fr3092_mcu/components/modules/littlefs/scripts/prefix.py
Normal file
61
fr3092_mcu/components/modules/littlefs/scripts/prefix.py
Normal file
@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env python2
|
||||
|
||||
# This script replaces prefixes of files, and symbols in that file.
|
||||
# Useful for creating different versions of the codebase that don't
|
||||
# conflict at compile time.
|
||||
#
|
||||
# example:
|
||||
# $ ./scripts/prefix.py lfs2
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import glob
|
||||
import itertools
|
||||
import tempfile
|
||||
import shutil
|
||||
import subprocess
|
||||
|
||||
DEFAULT_PREFIX = "lfs"
|
||||
|
||||
def subn(from_prefix, to_prefix, name):
|
||||
name, count1 = re.subn('\\b'+from_prefix, to_prefix, name)
|
||||
name, count2 = re.subn('\\b'+from_prefix.upper(), to_prefix.upper(), name)
|
||||
name, count3 = re.subn('\\B-D'+from_prefix.upper(),
|
||||
'-D'+to_prefix.upper(), name)
|
||||
return name, count1+count2+count3
|
||||
|
||||
def main(from_prefix, to_prefix=None, files=None):
|
||||
if not to_prefix:
|
||||
from_prefix, to_prefix = DEFAULT_PREFIX, from_prefix
|
||||
|
||||
if not files:
|
||||
files = subprocess.check_output([
|
||||
'git', 'ls-tree', '-r', '--name-only', 'HEAD']).split()
|
||||
|
||||
for oldname in files:
|
||||
# Rename any matching file names
|
||||
newname, namecount = subn(from_prefix, to_prefix, oldname)
|
||||
if namecount:
|
||||
subprocess.check_call(['git', 'mv', oldname, newname])
|
||||
|
||||
# Rename any prefixes in file
|
||||
count = 0
|
||||
with open(newname+'~', 'w') as tempf:
|
||||
with open(newname) as newf:
|
||||
for line in newf:
|
||||
line, n = subn(from_prefix, to_prefix, line)
|
||||
count += n
|
||||
tempf.write(line)
|
||||
shutil.copystat(newname, newname+'~')
|
||||
os.rename(newname+'~', newname)
|
||||
subprocess.check_call(['git', 'add', newname])
|
||||
|
||||
# Summary
|
||||
print '%s: %d replacements' % (
|
||||
'%s -> %s' % (oldname, newname) if namecount else oldname,
|
||||
count)
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
sys.exit(main(*sys.argv[1:]))
|
26
fr3092_mcu/components/modules/littlefs/scripts/readblock.py
Normal file
26
fr3092_mcu/components/modules/littlefs/scripts/readblock.py
Normal file
@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import subprocess as sp
|
||||
|
||||
def main(args):
|
||||
with open(args.disk, 'rb') as f:
|
||||
f.seek(args.block * args.block_size)
|
||||
block = (f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# what did you expect?
|
||||
print("%-8s %-s" % ('off', 'data'))
|
||||
return sp.run(['xxd', '-g1', '-'], input=block).returncode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Hex dump a specific block in a disk.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block', type=lambda x: int(x, 0),
|
||||
help="Address of block to dump.")
|
||||
sys.exit(main(parser.parse_args()))
|
367
fr3092_mcu/components/modules/littlefs/scripts/readmdir.py
Normal file
367
fr3092_mcu/components/modules/littlefs/scripts/readmdir.py
Normal file
@ -0,0 +1,367 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import binascii
|
||||
import sys
|
||||
import itertools as it
|
||||
|
||||
TAG_TYPES = {
|
||||
'splice': (0x700, 0x400),
|
||||
'create': (0x7ff, 0x401),
|
||||
'delete': (0x7ff, 0x4ff),
|
||||
'name': (0x700, 0x000),
|
||||
'reg': (0x7ff, 0x001),
|
||||
'dir': (0x7ff, 0x002),
|
||||
'superblock': (0x7ff, 0x0ff),
|
||||
'struct': (0x700, 0x200),
|
||||
'dirstruct': (0x7ff, 0x200),
|
||||
'ctzstruct': (0x7ff, 0x202),
|
||||
'inlinestruct': (0x7ff, 0x201),
|
||||
'userattr': (0x700, 0x300),
|
||||
'tail': (0x700, 0x600),
|
||||
'softtail': (0x7ff, 0x600),
|
||||
'hardtail': (0x7ff, 0x601),
|
||||
'gstate': (0x700, 0x700),
|
||||
'movestate': (0x7ff, 0x7ff),
|
||||
'crc': (0x700, 0x500),
|
||||
}
|
||||
|
||||
class Tag:
|
||||
def __init__(self, *args):
|
||||
if len(args) == 1:
|
||||
self.tag = args[0]
|
||||
elif len(args) == 3:
|
||||
if isinstance(args[0], str):
|
||||
type = TAG_TYPES[args[0]][1]
|
||||
else:
|
||||
type = args[0]
|
||||
|
||||
if isinstance(args[1], str):
|
||||
id = int(args[1], 0) if args[1] not in 'x.' else 0x3ff
|
||||
else:
|
||||
id = args[1]
|
||||
|
||||
if isinstance(args[2], str):
|
||||
size = int(args[2], str) if args[2] not in 'x.' else 0x3ff
|
||||
else:
|
||||
size = args[2]
|
||||
|
||||
self.tag = (type << 20) | (id << 10) | size
|
||||
else:
|
||||
assert False
|
||||
|
||||
@property
|
||||
def isvalid(self):
|
||||
return not bool(self.tag & 0x80000000)
|
||||
|
||||
@property
|
||||
def isattr(self):
|
||||
return not bool(self.tag & 0x40000000)
|
||||
|
||||
@property
|
||||
def iscompactable(self):
|
||||
return bool(self.tag & 0x20000000)
|
||||
|
||||
@property
|
||||
def isunique(self):
|
||||
return not bool(self.tag & 0x10000000)
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def type1(self):
|
||||
return (self.tag & 0x70000000) >> 20
|
||||
|
||||
@property
|
||||
def type3(self):
|
||||
return (self.tag & 0x7ff00000) >> 20
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return (self.tag & 0x000ffc00) >> 10
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return (self.tag & 0x000003ff) >> 0
|
||||
|
||||
@property
|
||||
def dsize(self):
|
||||
return 4 + (self.size if self.size != 0x3ff else 0)
|
||||
|
||||
@property
|
||||
def chunk(self):
|
||||
return self.type & 0xff
|
||||
|
||||
@property
|
||||
def schunk(self):
|
||||
return struct.unpack('b', struct.pack('B', self.chunk))[0]
|
||||
|
||||
def is_(self, type):
|
||||
return (self.type & TAG_TYPES[type][0]) == TAG_TYPES[type][1]
|
||||
|
||||
def mkmask(self):
|
||||
return Tag(
|
||||
0x700 if self.isunique else 0x7ff,
|
||||
0x3ff if self.isattr else 0,
|
||||
0)
|
||||
|
||||
def chid(self, nid):
|
||||
ntag = Tag(self.type, nid, self.size)
|
||||
if hasattr(self, 'off'): ntag.off = self.off
|
||||
if hasattr(self, 'data'): ntag.data = self.data
|
||||
if hasattr(self, 'crc'): ntag.crc = self.crc
|
||||
return ntag
|
||||
|
||||
def typerepr(self):
|
||||
if self.is_('crc') and getattr(self, 'crc', 0xffffffff) != 0xffffffff:
|
||||
return 'crc (bad)'
|
||||
|
||||
reverse_types = {v: k for k, v in TAG_TYPES.items()}
|
||||
for prefix in range(12):
|
||||
mask = 0x7ff & ~((1 << prefix)-1)
|
||||
if (mask, self.type & mask) in reverse_types:
|
||||
type = reverse_types[mask, self.type & mask]
|
||||
if prefix > 0:
|
||||
return '%s %#0*x' % (
|
||||
type, prefix//4, self.type & ((1 << prefix)-1))
|
||||
else:
|
||||
return type
|
||||
else:
|
||||
return '%02x' % self.type
|
||||
|
||||
def idrepr(self):
|
||||
return repr(self.id) if self.id != 0x3ff else '.'
|
||||
|
||||
def sizerepr(self):
|
||||
return repr(self.size) if self.size != 0x3ff else 'x'
|
||||
|
||||
def __repr__(self):
|
||||
return 'Tag(%r, %d, %d)' % (self.typerepr(), self.id, self.size)
|
||||
|
||||
def __lt__(self, other):
|
||||
return (self.id, self.type) < (other.id, other.type)
|
||||
|
||||
def __bool__(self):
|
||||
return self.isvalid
|
||||
|
||||
def __int__(self):
|
||||
return self.tag
|
||||
|
||||
def __index__(self):
|
||||
return self.tag
|
||||
|
||||
class MetadataPair:
|
||||
def __init__(self, blocks):
|
||||
if len(blocks) > 1:
|
||||
self.pair = [MetadataPair([block]) for block in blocks]
|
||||
self.pair = sorted(self.pair, reverse=True)
|
||||
|
||||
self.data = self.pair[0].data
|
||||
self.rev = self.pair[0].rev
|
||||
self.tags = self.pair[0].tags
|
||||
self.ids = self.pair[0].ids
|
||||
self.log = self.pair[0].log
|
||||
self.all_ = self.pair[0].all_
|
||||
return
|
||||
|
||||
self.pair = [self]
|
||||
self.data = blocks[0]
|
||||
block = self.data
|
||||
|
||||
self.rev, = struct.unpack('<I', block[0:4])
|
||||
crc = binascii.crc32(block[0:4])
|
||||
|
||||
# parse tags
|
||||
corrupt = False
|
||||
tag = Tag(0xffffffff)
|
||||
off = 4
|
||||
self.log = []
|
||||
self.all_ = []
|
||||
while len(block) - off >= 4:
|
||||
ntag, = struct.unpack('>I', block[off:off+4])
|
||||
|
||||
tag = Tag(int(tag) ^ ntag)
|
||||
tag.off = off + 4
|
||||
tag.data = block[off+4:off+tag.dsize]
|
||||
if tag.is_('crc'):
|
||||
crc = binascii.crc32(block[off:off+4+4], crc)
|
||||
else:
|
||||
crc = binascii.crc32(block[off:off+tag.dsize], crc)
|
||||
tag.crc = crc
|
||||
off += tag.dsize
|
||||
|
||||
self.all_.append(tag)
|
||||
|
||||
if tag.is_('crc'):
|
||||
# is valid commit?
|
||||
if crc != 0xffffffff:
|
||||
corrupt = True
|
||||
if not corrupt:
|
||||
self.log = self.all_.copy()
|
||||
|
||||
# reset tag parsing
|
||||
crc = 0
|
||||
tag = Tag(int(tag) ^ ((tag.type & 1) << 31))
|
||||
|
||||
# find active ids
|
||||
self.ids = list(it.takewhile(
|
||||
lambda id: Tag('name', id, 0) in self,
|
||||
it.count()))
|
||||
|
||||
# find most recent tags
|
||||
self.tags = []
|
||||
for tag in self.log:
|
||||
if tag.is_('crc') or tag.is_('splice'):
|
||||
continue
|
||||
elif tag.id == 0x3ff:
|
||||
if tag in self and self[tag] is tag:
|
||||
self.tags.append(tag)
|
||||
else:
|
||||
# id could have change, I know this is messy and slow
|
||||
# but it works
|
||||
for id in self.ids:
|
||||
ntag = tag.chid(id)
|
||||
if ntag in self and self[ntag] is tag:
|
||||
self.tags.append(ntag)
|
||||
|
||||
self.tags = sorted(self.tags)
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.log)
|
||||
|
||||
def __lt__(self, other):
|
||||
# corrupt blocks don't count
|
||||
if not self or not other:
|
||||
return bool(other)
|
||||
|
||||
# use sequence arithmetic to avoid overflow
|
||||
return not ((other.rev - self.rev) & 0x80000000)
|
||||
|
||||
def __contains__(self, args):
|
||||
try:
|
||||
self[args]
|
||||
return True
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def __getitem__(self, args):
|
||||
if isinstance(args, tuple):
|
||||
gmask, gtag = args
|
||||
else:
|
||||
gmask, gtag = args.mkmask(), args
|
||||
|
||||
gdiff = 0
|
||||
for tag in reversed(self.log):
|
||||
if (gmask.id != 0 and tag.is_('splice') and
|
||||
tag.id <= gtag.id - gdiff):
|
||||
if tag.is_('create') and tag.id == gtag.id - gdiff:
|
||||
# creation point
|
||||
break
|
||||
|
||||
gdiff += tag.schunk
|
||||
|
||||
if ((int(gmask) & int(tag)) ==
|
||||
(int(gmask) & int(gtag.chid(gtag.id - gdiff)))):
|
||||
if tag.size == 0x3ff:
|
||||
# deleted
|
||||
break
|
||||
|
||||
return tag
|
||||
|
||||
raise KeyError(gmask, gtag)
|
||||
|
||||
def _dump_tags(self, tags, f=sys.stdout, truncate=True):
|
||||
f.write("%-8s %-8s %-13s %4s %4s" % (
|
||||
'off', 'tag', 'type', 'id', 'len'))
|
||||
if truncate:
|
||||
f.write(' data (truncated)')
|
||||
f.write('\n')
|
||||
|
||||
for tag in tags:
|
||||
f.write("%08x: %08x %-13s %4s %4s" % (
|
||||
tag.off, tag,
|
||||
tag.typerepr(), tag.idrepr(), tag.sizerepr()))
|
||||
if truncate:
|
||||
f.write(" %-23s %-8s\n" % (
|
||||
' '.join('%02x' % c for c in tag.data[:8]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[:8]))))
|
||||
else:
|
||||
f.write("\n")
|
||||
for i in range(0, len(tag.data), 16):
|
||||
f.write(" %08x: %-47s %-16s\n" % (
|
||||
tag.off+i,
|
||||
' '.join('%02x' % c for c in tag.data[i:i+16]),
|
||||
''.join(c if c >= ' ' and c <= '~' else '.'
|
||||
for c in map(chr, tag.data[i:i+16]))))
|
||||
|
||||
def dump_tags(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.tags, f=f, truncate=truncate)
|
||||
|
||||
def dump_log(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.log, f=f, truncate=truncate)
|
||||
|
||||
def dump_all(self, f=sys.stdout, truncate=True):
|
||||
self._dump_tags(self.all_, f=f, truncate=truncate)
|
||||
|
||||
def main(args):
|
||||
blocks = []
|
||||
with open(args.disk, 'rb') as f:
|
||||
for block in [args.block1, args.block2]:
|
||||
if block is None:
|
||||
continue
|
||||
f.seek(block * args.block_size)
|
||||
blocks.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
|
||||
# find most recent pair
|
||||
mdir = MetadataPair(blocks)
|
||||
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
print("mdir {%s} rev %d%s%s%s" % (
|
||||
', '.join('%#x' % b
|
||||
for b in [args.block1, args.block2]
|
||||
if b is not None),
|
||||
mdir.rev,
|
||||
' (was %s)' % ', '.join('%d' % m.rev for m in mdir.pair[1:])
|
||||
if len(mdir.pair) > 1 else '',
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
if args.all:
|
||||
mdir.dump_all(truncate=not args.no_truncate)
|
||||
elif args.log:
|
||||
mdir.dump_log(truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(truncate=not args.no_truncate)
|
||||
|
||||
return 0 if mdir else 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump useful info about metadata pairs in littlefs.")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', type=lambda x: int(x, 0),
|
||||
help="First block address for finding the metadata pair.")
|
||||
parser.add_argument('block2', nargs='?', type=lambda x: int(x, 0),
|
||||
help="Second block address for finding the metadata pair.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Don't truncate large amounts of data.")
|
||||
sys.exit(main(parser.parse_args()))
|
183
fr3092_mcu/components/modules/littlefs/scripts/readtree.py
Normal file
183
fr3092_mcu/components/modules/littlefs/scripts/readtree.py
Normal file
@ -0,0 +1,183 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import struct
|
||||
import sys
|
||||
import json
|
||||
import io
|
||||
import itertools as it
|
||||
from readmdir import Tag, MetadataPair
|
||||
|
||||
def main(args):
|
||||
superblock = None
|
||||
gstate = b'\0\0\0\0\0\0\0\0\0\0\0\0'
|
||||
dirs = []
|
||||
mdirs = []
|
||||
corrupted = []
|
||||
cycle = False
|
||||
with open(args.disk, 'rb') as f:
|
||||
tail = (args.block1, args.block2)
|
||||
hard = False
|
||||
while True:
|
||||
for m in it.chain((m for d in dirs for m in d), mdirs):
|
||||
if set(m.blocks) == set(tail):
|
||||
# cycle detected
|
||||
cycle = m.blocks
|
||||
if cycle:
|
||||
break
|
||||
|
||||
# load mdir
|
||||
data = []
|
||||
blocks = {}
|
||||
for block in tail:
|
||||
f.seek(block * args.block_size)
|
||||
data.append(f.read(args.block_size)
|
||||
.ljust(args.block_size, b'\xff'))
|
||||
blocks[id(data[-1])] = block
|
||||
|
||||
mdir = MetadataPair(data)
|
||||
mdir.blocks = tuple(blocks[id(p.data)] for p in mdir.pair)
|
||||
|
||||
# fetch some key metadata as a we scan
|
||||
try:
|
||||
mdir.tail = mdir[Tag('tail', 0, 0)]
|
||||
if mdir.tail.size != 8 or mdir.tail.data == 8*b'\xff':
|
||||
mdir.tail = None
|
||||
except KeyError:
|
||||
mdir.tail = None
|
||||
|
||||
# have superblock?
|
||||
try:
|
||||
nsuperblock = mdir[
|
||||
Tag(0x7ff, 0x3ff, 0), Tag('superblock', 0, 0)]
|
||||
superblock = nsuperblock, mdir[Tag('inlinestruct', 0, 0)]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# have gstate?
|
||||
try:
|
||||
ngstate = mdir[Tag('movestate', 0, 0)]
|
||||
gstate = bytes((a or 0) ^ (b or 0)
|
||||
for a,b in it.zip_longest(gstate, ngstate.data))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# corrupted?
|
||||
if not mdir:
|
||||
corrupted.append(mdir)
|
||||
|
||||
# add to directories
|
||||
mdirs.append(mdir)
|
||||
if mdir.tail is None or not mdir.tail.is_('hardtail'):
|
||||
dirs.append(mdirs)
|
||||
mdirs = []
|
||||
|
||||
if mdir.tail is None:
|
||||
break
|
||||
|
||||
tail = struct.unpack('<II', mdir.tail.data)
|
||||
hard = mdir.tail.is_('hardtail')
|
||||
|
||||
# find paths
|
||||
dirtable = {}
|
||||
for dir in dirs:
|
||||
dirtable[frozenset(dir[0].blocks)] = dir
|
||||
|
||||
pending = [("/", dirs[0])]
|
||||
while pending:
|
||||
path, dir = pending.pop(0)
|
||||
for mdir in dir:
|
||||
for tag in mdir.tags:
|
||||
if tag.is_('dir'):
|
||||
try:
|
||||
npath = tag.data.decode('utf8')
|
||||
dirstruct = mdir[Tag('dirstruct', tag.id, 0)]
|
||||
nblocks = struct.unpack('<II', dirstruct.data)
|
||||
nmdir = dirtable[frozenset(nblocks)]
|
||||
pending.append(((path + '/' + npath), nmdir))
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
dir[0].path = path.replace('//', '/')
|
||||
|
||||
# print littlefs + version info
|
||||
version = ('?', '?')
|
||||
if superblock:
|
||||
version = tuple(reversed(
|
||||
struct.unpack('<HH', superblock[1].data[0:4].ljust(4, b'\xff'))))
|
||||
print("%-47s%s" % ("littlefs v%s.%s" % version,
|
||||
"data (truncated, if it fits)"
|
||||
if not any([args.no_truncate, args.log, args.all]) else ""))
|
||||
|
||||
# print gstate
|
||||
print("gstate 0x%s" % ''.join('%02x' % c for c in gstate))
|
||||
tag = Tag(struct.unpack('<I', gstate[0:4].ljust(4, b'\xff'))[0])
|
||||
blocks = struct.unpack('<II', gstate[4:4+8].ljust(8, b'\xff'))
|
||||
if tag.size or not tag.isvalid:
|
||||
print(" orphans >=%d" % max(tag.size, 1))
|
||||
if tag.type:
|
||||
print(" move dir {%#x, %#x} id %d" % (
|
||||
blocks[0], blocks[1], tag.id))
|
||||
|
||||
# print mdir info
|
||||
for i, dir in enumerate(dirs):
|
||||
print("dir %s" % (json.dumps(dir[0].path)
|
||||
if hasattr(dir[0], 'path') else '(orphan)'))
|
||||
|
||||
for j, mdir in enumerate(dir):
|
||||
print("mdir {%#x, %#x} rev %d (was %d)%s%s" % (
|
||||
mdir.blocks[0], mdir.blocks[1], mdir.rev, mdir.pair[1].rev,
|
||||
' (corrupted!)' if not mdir else '',
|
||||
' -> {%#x, %#x}' % struct.unpack('<II', mdir.tail.data)
|
||||
if mdir.tail else ''))
|
||||
|
||||
f = io.StringIO()
|
||||
if args.log:
|
||||
mdir.dump_log(f, truncate=not args.no_truncate)
|
||||
elif args.all:
|
||||
mdir.dump_all(f, truncate=not args.no_truncate)
|
||||
else:
|
||||
mdir.dump_tags(f, truncate=not args.no_truncate)
|
||||
|
||||
lines = list(filter(None, f.getvalue().split('\n')))
|
||||
for k, line in enumerate(lines):
|
||||
print("%s %s" % (
|
||||
' ' if j == len(dir)-1 else
|
||||
'v' if k == len(lines)-1 else
|
||||
'|',
|
||||
line))
|
||||
|
||||
errcode = 0
|
||||
for mdir in corrupted:
|
||||
errcode = errcode or 1
|
||||
print("*** corrupted mdir {%#x, %#x}! ***" % (
|
||||
mdir.blocks[0], mdir.blocks[1]))
|
||||
|
||||
if cycle:
|
||||
errcode = errcode or 2
|
||||
print("*** cycle detected {%#x, %#x}! ***" % (
|
||||
cycle[0], cycle[1]))
|
||||
|
||||
return errcode
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Dump semantic info about the metadata tree in littlefs")
|
||||
parser.add_argument('disk',
|
||||
help="File representing the block device.")
|
||||
parser.add_argument('block_size', type=lambda x: int(x, 0),
|
||||
help="Size of a block in bytes.")
|
||||
parser.add_argument('block1', nargs='?', default=0,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional first block address for finding the superblock.")
|
||||
parser.add_argument('block2', nargs='?', default=1,
|
||||
type=lambda x: int(x, 0),
|
||||
help="Optional second block address for finding the superblock.")
|
||||
parser.add_argument('-l', '--log', action='store_true',
|
||||
help="Show tags in log.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all tags in log, included tags in corrupted commits.")
|
||||
parser.add_argument('-T', '--no-truncate', action='store_true',
|
||||
help="Show the full contents of files/attrs/tags.")
|
||||
sys.exit(main(parser.parse_args()))
|
430
fr3092_mcu/components/modules/littlefs/scripts/stack.py
Normal file
430
fr3092_mcu/components/modules/littlefs/scripts/stack.py
Normal file
@ -0,0 +1,430 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find stack usage at the function level. Will detect recursion and
|
||||
# report as infinite stack usage.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
import math as m
|
||||
|
||||
|
||||
CI_PATHS = ['*.ci']
|
||||
|
||||
def collect(paths, **args):
|
||||
# parse the vcg format
|
||||
k_pattern = re.compile('([a-z]+)\s*:', re.DOTALL)
|
||||
v_pattern = re.compile('(?:"(.*?)"|([a-z]+))', re.DOTALL)
|
||||
def parse_vcg(rest):
|
||||
def parse_vcg(rest):
|
||||
node = []
|
||||
while True:
|
||||
rest = rest.lstrip()
|
||||
m = k_pattern.match(rest)
|
||||
if not m:
|
||||
return (node, rest)
|
||||
k, rest = m.group(1), rest[m.end(0):]
|
||||
|
||||
rest = rest.lstrip()
|
||||
if rest.startswith('{'):
|
||||
v, rest = parse_vcg(rest[1:])
|
||||
assert rest[0] == '}', "unexpected %r" % rest[0:1]
|
||||
rest = rest[1:]
|
||||
node.append((k, v))
|
||||
else:
|
||||
m = v_pattern.match(rest)
|
||||
assert m, "unexpected %r" % rest[0:1]
|
||||
v, rest = m.group(1) or m.group(2), rest[m.end(0):]
|
||||
node.append((k, v))
|
||||
|
||||
node, rest = parse_vcg(rest)
|
||||
assert rest == '', "unexpected %r" % rest[0:1]
|
||||
return node
|
||||
|
||||
# collect into functions
|
||||
results = co.defaultdict(lambda: (None, None, 0, set()))
|
||||
f_pattern = re.compile(
|
||||
r'([^\\]*)\\n([^:]*)[^\\]*\\n([0-9]+) bytes \((.*)\)')
|
||||
for path in paths:
|
||||
with open(path) as f:
|
||||
vcg = parse_vcg(f.read())
|
||||
for k, graph in vcg:
|
||||
if k != 'graph':
|
||||
continue
|
||||
for k, info in graph:
|
||||
if k == 'node':
|
||||
info = dict(info)
|
||||
m = f_pattern.match(info['label'])
|
||||
if m:
|
||||
function, file, size, type = m.groups()
|
||||
if not args.get('quiet') and type != 'static':
|
||||
print('warning: found non-static stack for %s (%s)'
|
||||
% (function, type))
|
||||
_, _, _, targets = results[info['title']]
|
||||
results[info['title']] = (
|
||||
file, function, int(size), targets)
|
||||
elif k == 'edge':
|
||||
info = dict(info)
|
||||
_, _, _, targets = results[info['sourcename']]
|
||||
targets.add(info['targetname'])
|
||||
else:
|
||||
continue
|
||||
|
||||
if not args.get('everything'):
|
||||
for source, (s_file, s_function, _, _) in list(results.items()):
|
||||
# discard internal functions
|
||||
if s_file.startswith('<') or s_file.startswith('/usr/include'):
|
||||
del results[source]
|
||||
|
||||
# find maximum stack size recursively, this requires also detecting cycles
|
||||
# (in case of recursion)
|
||||
def find_limit(source, seen=None):
|
||||
seen = seen or set()
|
||||
if source not in results:
|
||||
return 0
|
||||
_, _, frame, targets = results[source]
|
||||
|
||||
limit = 0
|
||||
for target in targets:
|
||||
if target in seen:
|
||||
# found a cycle
|
||||
return float('inf')
|
||||
limit_ = find_limit(target, seen | {target})
|
||||
limit = max(limit, limit_)
|
||||
|
||||
return frame + limit
|
||||
|
||||
def find_deps(targets):
|
||||
deps = set()
|
||||
for target in targets:
|
||||
if target in results:
|
||||
t_file, t_function, _, _ = results[target]
|
||||
deps.add((t_file, t_function))
|
||||
return deps
|
||||
|
||||
# flatten into a list
|
||||
flat_results = []
|
||||
for source, (s_file, s_function, frame, targets) in results.items():
|
||||
limit = find_limit(source)
|
||||
deps = find_deps(targets)
|
||||
flat_results.append((s_file, s_function, frame, limit, deps))
|
||||
|
||||
return flat_results
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .ci files
|
||||
paths = []
|
||||
for path in args['ci_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.ci'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .ci files found in %r?' % args['ci_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']), # note limit can be inf
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
|
||||
total_frame = 0
|
||||
total_limit = 0
|
||||
for _, _, frame, limit, _ in results:
|
||||
total_frame += frame
|
||||
total_limit = max(total_limit, limit)
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['stack_frame']),
|
||||
float(result['stack_limit']),
|
||||
set())
|
||||
for result in r
|
||||
if result.get('stack_frame') not in {None, ''}
|
||||
if result.get('stack_limit') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total_frame = 0
|
||||
prev_total_limit = 0
|
||||
for _, _, frame, limit, _ in prev_results:
|
||||
prev_total_frame += frame
|
||||
prev_total_limit = max(prev_total_limit, limit)
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
func = result.pop('name', '')
|
||||
result.pop('stack_frame', None)
|
||||
result.pop('stack_limit', None)
|
||||
merged_results[(file, func)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, func, frame, limit, _ in results:
|
||||
merged_results[(file, func)]['stack_frame'] = frame
|
||||
merged_results[(file, func)]['stack_limit'] = limit
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'stack_frame', 'stack_limit'])
|
||||
w.writeheader()
|
||||
for (file, func), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': func, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: (0, 0, set()))
|
||||
for file, func, frame, limit, deps in results:
|
||||
entry = (file if by == 'file' else func)
|
||||
entry_frame, entry_limit, entry_deps = entries[entry]
|
||||
entries[entry] = (
|
||||
entry_frame + frame,
|
||||
max(entry_limit, limit),
|
||||
entry_deps | {file if by == 'file' else func
|
||||
for file, func in deps})
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (None, None, None, None, 0, 0, 0, set()))
|
||||
for name, (new_frame, new_limit, deps) in news.items():
|
||||
diff[name] = (
|
||||
None, None,
|
||||
new_frame, new_limit,
|
||||
new_frame, new_limit,
|
||||
1.0,
|
||||
deps)
|
||||
for name, (old_frame, old_limit, _) in olds.items():
|
||||
_, _, new_frame, new_limit, _, _, _, deps = diff[name]
|
||||
diff[name] = (
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
(new_frame or 0) - (old_frame or 0),
|
||||
0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else (new_limit or 0) - (old_limit or 0),
|
||||
0.0 if m.isinf(new_limit or 0) and m.isinf(old_limit or 0)
|
||||
else +float('inf') if m.isinf(new_limit or 0)
|
||||
else -float('inf') if m.isinf(old_limit or 0)
|
||||
else +0.0 if not old_limit and not new_limit
|
||||
else +1.0 if not old_limit
|
||||
else ((new_limit or 0) - (old_limit or 0))/(old_limit or 0),
|
||||
deps)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][0], x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][0], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('limit_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][3] or 0), x))
|
||||
elif args.get('reverse_limit_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][3] or 0), x))
|
||||
elif args.get('frame_sort'):
|
||||
return sorted(entries, key=lambda x: (-(x[1][2] or 0), x))
|
||||
elif args.get('reverse_frame_sort'):
|
||||
return sorted(entries, key=lambda x: (+(x[1][2] or 0), x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][6], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s %7s' % (by, 'frame', 'limit'))
|
||||
else:
|
||||
print('%-36s %15s %15s %15s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, frame, limit):
|
||||
print("%-36s %7d %7s" % (name,
|
||||
frame, '∞' if m.isinf(limit) else int(limit)))
|
||||
|
||||
def print_diff_entry(name,
|
||||
old_frame, old_limit,
|
||||
new_frame, new_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio):
|
||||
print('%-36s %7s %7s %7s %7s %+7d %7s%s' % (name,
|
||||
old_frame if old_frame is not None else "-",
|
||||
('∞' if m.isinf(old_limit) else int(old_limit))
|
||||
if old_limit is not None else "-",
|
||||
new_frame if new_frame is not None else "-",
|
||||
('∞' if m.isinf(new_limit) else int(new_limit))
|
||||
if new_limit is not None else "-",
|
||||
diff_frame,
|
||||
('+∞' if diff_limit > 0 and m.isinf(diff_limit)
|
||||
else '-∞' if diff_limit < 0 and m.isinf(diff_limit)
|
||||
else '%+d' % diff_limit),
|
||||
'' if not ratio
|
||||
else ' (+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else ' (-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else ' (%+.1f%%)' % (100*ratio)))
|
||||
|
||||
def print_entries(by='name'):
|
||||
# build optional tree of dependencies
|
||||
def print_deps(entries, depth, print,
|
||||
filter=lambda _: True,
|
||||
prefixes=('', '', '', '')):
|
||||
entries = entries if isinstance(entries, list) else list(entries)
|
||||
filtered_entries = [(name, entry)
|
||||
for name, entry in entries
|
||||
if filter(name)]
|
||||
for i, (name, entry) in enumerate(filtered_entries):
|
||||
last = (i == len(filtered_entries)-1)
|
||||
print(prefixes[0+last] + name, entry)
|
||||
|
||||
if depth > 0:
|
||||
deps = entry[-1]
|
||||
print_deps(entries, depth-1, print,
|
||||
lambda name: name in deps,
|
||||
( prefixes[2+last] + "|-> ",
|
||||
prefixes[2+last] + "'-> ",
|
||||
prefixes[2+last] + "| ",
|
||||
prefixes[2+last] + " "))
|
||||
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
print_deps(
|
||||
sorted_entries(entries.items()),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_entry(name, *entry[:-1]))
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for _, old, _, _, _, _, _, _ in diff.values() if old is None),
|
||||
sum(1 for _, _, _, new, _, _, _, _ in diff.values() if new is None)))
|
||||
print_deps(
|
||||
filter(
|
||||
lambda x: x[1][6] or args.get('all'),
|
||||
sorted_diff_entries(diff.items())),
|
||||
args.get('depth') or 0,
|
||||
lambda name, entry: print_diff_entry(name, *entry[:-1]))
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total_frame, total_limit)
|
||||
else:
|
||||
diff_frame = total_frame - prev_total_frame
|
||||
diff_limit = (
|
||||
0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else (total_limit or 0) - (prev_total_limit or 0))
|
||||
ratio = (
|
||||
0.0 if m.isinf(total_limit or 0) and m.isinf(prev_total_limit or 0)
|
||||
else +float('inf') if m.isinf(total_limit or 0)
|
||||
else -float('inf') if m.isinf(prev_total_limit or 0)
|
||||
else 0.0 if not prev_total_limit and not total_limit
|
||||
else 1.0 if not prev_total_limit
|
||||
else ((total_limit or 0) - (prev_total_limit or 0))/(prev_total_limit or 0))
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total_frame, prev_total_limit,
|
||||
total_frame, total_limit,
|
||||
diff_frame, diff_limit,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find stack usage at the function level.")
|
||||
parser.add_argument('ci_paths', nargs='*', default=CI_PATHS,
|
||||
help="Description of where to find *.ci files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % CI_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't parse callgraph files, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--limit-sort', action='store_true',
|
||||
help="Sort by stack limit.")
|
||||
parser.add_argument('-S', '--reverse-limit-sort', action='store_true',
|
||||
help="Sort by stack limit, but backwards.")
|
||||
parser.add_argument('--frame-sort', action='store_true',
|
||||
help="Sort by stack frame size.")
|
||||
parser.add_argument('--reverse-frame-sort', action='store_true',
|
||||
help="Sort by stack frame size, but backwards.")
|
||||
parser.add_argument('-L', '--depth', default=0, type=lambda x: int(x, 0),
|
||||
nargs='?', const=float('inf'),
|
||||
help="Depth of dependencies to show.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total stack size.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
331
fr3092_mcu/components/modules/littlefs/scripts/structs.py
Normal file
331
fr3092_mcu/components/modules/littlefs/scripts/structs.py
Normal file
@ -0,0 +1,331 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to find struct sizes.
|
||||
#
|
||||
|
||||
import os
|
||||
import glob
|
||||
import itertools as it
|
||||
import subprocess as sp
|
||||
import shlex
|
||||
import re
|
||||
import csv
|
||||
import collections as co
|
||||
|
||||
|
||||
OBJ_PATHS = ['*.o']
|
||||
|
||||
def collect(paths, **args):
|
||||
decl_pattern = re.compile(
|
||||
'^\s+(?P<no>[0-9]+)'
|
||||
'\s+(?P<dir>[0-9]+)'
|
||||
'\s+.*'
|
||||
'\s+(?P<file>[^\s]+)$')
|
||||
struct_pattern = re.compile(
|
||||
'^(?:.*DW_TAG_(?P<tag>[a-z_]+).*'
|
||||
'|^.*DW_AT_name.*:\s*(?P<name>[^:\s]+)\s*'
|
||||
'|^.*DW_AT_decl_file.*:\s*(?P<decl>[0-9]+)\s*'
|
||||
'|^.*DW_AT_byte_size.*:\s*(?P<size>[0-9]+)\s*)$')
|
||||
|
||||
results = co.defaultdict(lambda: 0)
|
||||
for path in paths:
|
||||
# find decl, we want to filter by structs in .h files
|
||||
decls = {}
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=rawline', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# find file numbers
|
||||
m = decl_pattern.match(line)
|
||||
if m:
|
||||
decls[int(m.group('no'))] = m.group('file')
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
# collect structs as we parse dwarf info
|
||||
found = False
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
|
||||
# note objdump-tool may contain extra args
|
||||
cmd = args['objdump_tool'] + ['--dwarf=info', path]
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE,
|
||||
stderr=sp.PIPE if not args.get('verbose') else None,
|
||||
universal_newlines=True,
|
||||
errors='replace')
|
||||
for line in proc.stdout:
|
||||
# state machine here to find structs
|
||||
m = struct_pattern.match(line)
|
||||
if m:
|
||||
if m.group('tag'):
|
||||
if (name is not None
|
||||
and decl is not None
|
||||
and size is not None):
|
||||
decl = decls.get(decl, '?')
|
||||
results[(decl, name)] = size
|
||||
found = (m.group('tag') == 'structure_type')
|
||||
name = None
|
||||
decl = None
|
||||
size = None
|
||||
elif found and m.group('name'):
|
||||
name = m.group('name')
|
||||
elif found and name and m.group('decl'):
|
||||
decl = int(m.group('decl'))
|
||||
elif found and name and m.group('size'):
|
||||
size = int(m.group('size'))
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in proc.stderr:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
flat_results = []
|
||||
for (file, struct), size in results.items():
|
||||
# map to source files
|
||||
if args.get('build_dir'):
|
||||
file = re.sub('%s/*' % re.escape(args['build_dir']), '', file)
|
||||
# only include structs declared in header files in the current
|
||||
# directory, ignore internal-only # structs (these are represented
|
||||
# in other measurements)
|
||||
if not args.get('everything'):
|
||||
if not file.endswith('.h'):
|
||||
continue
|
||||
# replace .o with .c, different scripts report .o/.c, we need to
|
||||
# choose one if we want to deduplicate csv files
|
||||
file = re.sub('\.o$', '.c', file)
|
||||
|
||||
flat_results.append((file, struct, size))
|
||||
|
||||
return flat_results
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find sizes
|
||||
if not args.get('use', None):
|
||||
# find .o files
|
||||
paths = []
|
||||
for path in args['obj_paths']:
|
||||
if os.path.isdir(path):
|
||||
path = path + '/*.o'
|
||||
|
||||
for path in glob.glob(path):
|
||||
paths.append(path)
|
||||
|
||||
if not paths:
|
||||
print('no .obj files found in %r?' % args['obj_paths'])
|
||||
sys.exit(-1)
|
||||
|
||||
results = collect(paths, **args)
|
||||
else:
|
||||
with openio(args['use']) as f:
|
||||
r = csv.DictReader(f)
|
||||
results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
|
||||
total = 0
|
||||
for _, _, size in results:
|
||||
total += size
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
prev_results = [
|
||||
( result['file'],
|
||||
result['name'],
|
||||
int(result['struct_size']))
|
||||
for result in r
|
||||
if result.get('struct_size') not in {None, ''}]
|
||||
except FileNotFoundError:
|
||||
prev_results = []
|
||||
|
||||
prev_total = 0
|
||||
for _, _, size in prev_results:
|
||||
prev_total += size
|
||||
|
||||
# write results to CSV
|
||||
if args.get('output'):
|
||||
merged_results = co.defaultdict(lambda: {})
|
||||
other_fields = []
|
||||
|
||||
# merge?
|
||||
if args.get('merge'):
|
||||
try:
|
||||
with openio(args['merge']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
struct = result.pop('name', '')
|
||||
result.pop('struct_size', None)
|
||||
merged_results[(file, struct)] = result
|
||||
other_fields = result.keys()
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
for file, struct, size in results:
|
||||
merged_results[(file, struct)]['struct_size'] = size
|
||||
|
||||
with openio(args['output'], 'w') as f:
|
||||
w = csv.DictWriter(f, ['file', 'name', *other_fields, 'struct_size'])
|
||||
w.writeheader()
|
||||
for (file, struct), result in sorted(merged_results.items()):
|
||||
w.writerow({'file': file, 'name': struct, **result})
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: 0)
|
||||
for file, struct, size in results:
|
||||
entry = (file if by == 'file' else struct)
|
||||
entries[entry] += size
|
||||
return entries
|
||||
|
||||
def diff_entries(olds, news):
|
||||
diff = co.defaultdict(lambda: (0, 0, 0, 0))
|
||||
for name, new in news.items():
|
||||
diff[name] = (0, new, new, 1.0)
|
||||
for name, old in olds.items():
|
||||
_, new, _, _ = diff[name]
|
||||
diff[name] = (old, new, new-old, (new-old)/old if old else 1.0)
|
||||
return diff
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1], x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def sorted_diff_entries(entries):
|
||||
if args.get('size_sort'):
|
||||
return sorted(entries, key=lambda x: (-x[1][1], x))
|
||||
elif args.get('reverse_size_sort'):
|
||||
return sorted(entries, key=lambda x: (+x[1][1], x))
|
||||
else:
|
||||
return sorted(entries, key=lambda x: (-x[1][3], x))
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s %7s' % (by, 'size'))
|
||||
else:
|
||||
print('%-36s %7s %7s %7s' % (by, 'old', 'new', 'diff'))
|
||||
|
||||
def print_entry(name, size):
|
||||
print("%-36s %7d" % (name, size))
|
||||
|
||||
def print_diff_entry(name, old, new, diff, ratio):
|
||||
print("%-36s %7s %7s %+7d%s" % (name,
|
||||
old or "-",
|
||||
new or "-",
|
||||
diff,
|
||||
' (%+.1f%%)' % (100*ratio) if ratio else ''))
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, size in sorted_entries(entries.items()):
|
||||
print_entry(name, size)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
diff = diff_entries(prev_entries, entries)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for old, _, _, _ in diff.values() if not old),
|
||||
sum(1 for _, new, _, _ in diff.values() if not new)))
|
||||
for name, (old, new, diff, ratio) in sorted_diff_entries(
|
||||
diff.items()):
|
||||
if ratio or args.get('all'):
|
||||
print_diff_entry(name, old, new, diff, ratio)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
ratio = (0.0 if not prev_total and not total
|
||||
else 1.0 if not prev_total
|
||||
else (total-prev_total)/prev_total)
|
||||
print_diff_entry('TOTAL',
|
||||
prev_total, total,
|
||||
total-prev_total,
|
||||
ratio)
|
||||
|
||||
if args.get('quiet'):
|
||||
pass
|
||||
elif args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Find struct sizes.")
|
||||
parser.add_argument('obj_paths', nargs='*', default=OBJ_PATHS,
|
||||
help="Description of where to find *.o files. May be a directory \
|
||||
or a list of paths. Defaults to %r." % OBJ_PATHS)
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output commands that run behind the scenes.")
|
||||
parser.add_argument('-q', '--quiet', action='store_true',
|
||||
help="Don't show anything, useful with -o.")
|
||||
parser.add_argument('-o', '--output',
|
||||
help="Specify CSV file to store results.")
|
||||
parser.add_argument('-u', '--use',
|
||||
help="Don't compile and find struct sizes, instead use this CSV file.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff struct size against.")
|
||||
parser.add_argument('-m', '--merge',
|
||||
help="Merge with an existing CSV file when writing to output.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all functions, not just the ones that changed.")
|
||||
parser.add_argument('-A', '--everything', action='store_true',
|
||||
help="Include builtin and libc specific symbols.")
|
||||
parser.add_argument('-s', '--size-sort', action='store_true',
|
||||
help="Sort by size.")
|
||||
parser.add_argument('-S', '--reverse-size-sort', action='store_true',
|
||||
help="Sort by size, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level struct sizes.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the total struct size.")
|
||||
parser.add_argument('--objdump-tool', default=['objdump'], type=lambda x: x.split(),
|
||||
help="Path to the objdump tool to use.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Specify the relative build directory. Used to map object files \
|
||||
to the correct source files.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
279
fr3092_mcu/components/modules/littlefs/scripts/summary.py
Normal file
279
fr3092_mcu/components/modules/littlefs/scripts/summary.py
Normal file
@ -0,0 +1,279 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Script to summarize the outputs of other scripts. Operates on CSV files.
|
||||
#
|
||||
|
||||
import functools as ft
|
||||
import collections as co
|
||||
import os
|
||||
import csv
|
||||
import re
|
||||
import math as m
|
||||
|
||||
# displayable fields
|
||||
Field = co.namedtuple('Field', 'name,parse,acc,key,fmt,repr,null,ratio')
|
||||
FIELDS = [
|
||||
# name, parse, accumulate, fmt, print, null
|
||||
Field('code',
|
||||
lambda r: int(r['code_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('data',
|
||||
lambda r: int(r['data_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('stack',
|
||||
lambda r: float(r['stack_limit']),
|
||||
max,
|
||||
lambda r: r,
|
||||
'%7s',
|
||||
lambda r: '∞' if m.isinf(r) else int(r),
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('structs',
|
||||
lambda r: int(r['struct_size']),
|
||||
sum,
|
||||
lambda r: r,
|
||||
'%8s',
|
||||
lambda r: r,
|
||||
'-',
|
||||
lambda old, new: (new-old)/old),
|
||||
Field('coverage',
|
||||
lambda r: (int(r['coverage_hits']), int(r['coverage_count'])),
|
||||
lambda rs: ft.reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), rs),
|
||||
lambda r: r[0]/r[1],
|
||||
'%19s',
|
||||
lambda r: '%11s %7s' % ('%d/%d' % (r[0], r[1]), '%.1f%%' % (100*r[0]/r[1])),
|
||||
'%11s %7s' % ('-', '-'),
|
||||
lambda old, new: ((new[0]/new[1]) - (old[0]/old[1])))
|
||||
]
|
||||
|
||||
|
||||
def main(**args):
|
||||
def openio(path, mode='r'):
|
||||
if path == '-':
|
||||
if 'r' in mode:
|
||||
return os.fdopen(os.dup(sys.stdin.fileno()), 'r')
|
||||
else:
|
||||
return os.fdopen(os.dup(sys.stdout.fileno()), 'w')
|
||||
else:
|
||||
return open(path, mode)
|
||||
|
||||
# find results
|
||||
results = co.defaultdict(lambda: {})
|
||||
for path in args.get('csv_paths', '-'):
|
||||
try:
|
||||
with openio(path) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
# find fields
|
||||
if args.get('all_fields'):
|
||||
fields = FIELDS
|
||||
elif args.get('fields') is not None:
|
||||
fields_dict = {field.name: field for field in FIELDS}
|
||||
fields = [fields_dict[f] for f in args['fields']]
|
||||
else:
|
||||
fields = []
|
||||
for field in FIELDS:
|
||||
if any(field.name in result for result in results.values()):
|
||||
fields.append(field)
|
||||
|
||||
# find total for every field
|
||||
total = {}
|
||||
for result in results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in total:
|
||||
total[field.name] = field.acc(
|
||||
[total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
total[field.name] = result[field.name]
|
||||
|
||||
# find previous results?
|
||||
if args.get('diff'):
|
||||
prev_results = co.defaultdict(lambda: {})
|
||||
try:
|
||||
with openio(args['diff']) as f:
|
||||
r = csv.DictReader(f)
|
||||
for result in r:
|
||||
file = result.pop('file', '')
|
||||
name = result.pop('name', '')
|
||||
prev = prev_results[(file, name)]
|
||||
for field in FIELDS:
|
||||
try:
|
||||
r = field.parse(result)
|
||||
if field.name in prev:
|
||||
prev_results[(file, name)][field.name] = field.acc(
|
||||
[prev[field.name], r])
|
||||
else:
|
||||
prev_results[(file, name)][field.name] = r
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
prev_total = {}
|
||||
for result in prev_results.values():
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev_total:
|
||||
prev_total[field.name] = field.acc(
|
||||
[prev_total[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
prev_total[field.name] = result[field.name]
|
||||
|
||||
# print results
|
||||
def dedup_entries(results, by='name'):
|
||||
entries = co.defaultdict(lambda: {})
|
||||
for (file, func), result in results.items():
|
||||
entry = (file if by == 'file' else func)
|
||||
prev = entries[entry]
|
||||
for field in fields:
|
||||
if field.name in result and field.name in prev:
|
||||
entries[entry][field.name] = field.acc(
|
||||
[prev[field.name], result[field.name]])
|
||||
elif field.name in result:
|
||||
entries[entry][field.name] = result[field.name]
|
||||
return entries
|
||||
|
||||
def sorted_entries(entries):
|
||||
if args.get('sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
-(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
elif args.get('reverse_sort') is not None:
|
||||
field = {field.name: field for field in FIELDS}[args['reverse_sort']]
|
||||
return sorted(entries, key=lambda x: (
|
||||
+(field.key(x[1][field.name])) if field.name in x[1] else -1, x))
|
||||
else:
|
||||
return sorted(entries)
|
||||
|
||||
def print_header(by=''):
|
||||
if not args.get('diff'):
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print()
|
||||
else:
|
||||
print('%-36s' % by, end='')
|
||||
for field in fields:
|
||||
print((' '+field.fmt) % field.name, end='')
|
||||
print(' %-9s' % '', end='')
|
||||
print()
|
||||
|
||||
def print_entry(name, result):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
r = result.get(field.name)
|
||||
if r is not None:
|
||||
print((' '+field.fmt) % field.repr(r), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
print()
|
||||
|
||||
def print_diff_entry(name, old, new):
|
||||
print('%-36s' % name, end='')
|
||||
for field in fields:
|
||||
n = new.get(field.name)
|
||||
if n is not None:
|
||||
print((' '+field.fmt) % field.repr(n), end='')
|
||||
else:
|
||||
print((' '+field.fmt) % '-', end='')
|
||||
o = old.get(field.name)
|
||||
ratio = (
|
||||
0.0 if m.isinf(o or 0) and m.isinf(n or 0)
|
||||
else +float('inf') if m.isinf(n or 0)
|
||||
else -float('inf') if m.isinf(o or 0)
|
||||
else 0.0 if not o and not n
|
||||
else +1.0 if not o
|
||||
else -1.0 if not n
|
||||
else field.ratio(o, n))
|
||||
print(' %-9s' % (
|
||||
'' if not ratio
|
||||
else '(+∞%)' if ratio > 0 and m.isinf(ratio)
|
||||
else '(-∞%)' if ratio < 0 and m.isinf(ratio)
|
||||
else '(%+.1f%%)' % (100*ratio)), end='')
|
||||
print()
|
||||
|
||||
def print_entries(by='name'):
|
||||
entries = dedup_entries(results, by=by)
|
||||
|
||||
if not args.get('diff'):
|
||||
print_header(by=by)
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
print_entry(name, result)
|
||||
else:
|
||||
prev_entries = dedup_entries(prev_results, by=by)
|
||||
print_header(by='%s (%d added, %d removed)' % (by,
|
||||
sum(1 for name in entries if name not in prev_entries),
|
||||
sum(1 for name in prev_entries if name not in entries)))
|
||||
for name, result in sorted_entries(entries.items()):
|
||||
if args.get('all') or result != prev_entries.get(name, {}):
|
||||
print_diff_entry(name, prev_entries.get(name, {}), result)
|
||||
|
||||
def print_totals():
|
||||
if not args.get('diff'):
|
||||
print_entry('TOTAL', total)
|
||||
else:
|
||||
print_diff_entry('TOTAL', prev_total, total)
|
||||
|
||||
if args.get('summary'):
|
||||
print_header()
|
||||
print_totals()
|
||||
elif args.get('files'):
|
||||
print_entries(by='file')
|
||||
print_totals()
|
||||
else:
|
||||
print_entries(by='name')
|
||||
print_totals()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
import sys
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Summarize measurements")
|
||||
parser.add_argument('csv_paths', nargs='*', default='-',
|
||||
help="Description of where to find *.csv files. May be a directory \
|
||||
or list of paths. *.csv files will be merged to show the total \
|
||||
coverage.")
|
||||
parser.add_argument('-d', '--diff',
|
||||
help="Specify CSV file to diff against.")
|
||||
parser.add_argument('-a', '--all', action='store_true',
|
||||
help="Show all objects, not just the ones that changed.")
|
||||
parser.add_argument('-e', '--all-fields', action='store_true',
|
||||
help="Show all fields, even those with no results.")
|
||||
parser.add_argument('-f', '--fields', type=lambda x: re.split('\s*,\s*', x),
|
||||
help="Comma separated list of fields to print, by default all fields \
|
||||
that are found in the CSV files are printed.")
|
||||
parser.add_argument('-s', '--sort',
|
||||
help="Sort by this field.")
|
||||
parser.add_argument('-S', '--reverse-sort',
|
||||
help="Sort by this field, but backwards.")
|
||||
parser.add_argument('-F', '--files', action='store_true',
|
||||
help="Show file-level calls.")
|
||||
parser.add_argument('-Y', '--summary', action='store_true',
|
||||
help="Only show the totals.")
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
860
fr3092_mcu/components/modules/littlefs/scripts/test.py
Normal file
860
fr3092_mcu/components/modules/littlefs/scripts/test.py
Normal file
@ -0,0 +1,860 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# This script manages littlefs tests, which are configured with
|
||||
# .toml files stored in the tests directory.
|
||||
#
|
||||
|
||||
import toml
|
||||
import glob
|
||||
import re
|
||||
import os
|
||||
import io
|
||||
import itertools as it
|
||||
import collections.abc as abc
|
||||
import subprocess as sp
|
||||
import base64
|
||||
import sys
|
||||
import copy
|
||||
import shlex
|
||||
import pty
|
||||
import errno
|
||||
import signal
|
||||
|
||||
TEST_PATHS = 'tests'
|
||||
RULES = """
|
||||
# add block devices to sources
|
||||
TESTSRC ?= $(SRC) $(wildcard bd/*.c)
|
||||
|
||||
define FLATTEN
|
||||
%(path)s%%$(subst /,.,$(target)): $(target)
|
||||
./scripts/explode_asserts.py $$< -o $$@
|
||||
endef
|
||||
$(foreach target,$(TESTSRC),$(eval $(FLATTEN)))
|
||||
|
||||
-include %(path)s*.d
|
||||
.SECONDARY:
|
||||
|
||||
%(path)s.test: %(path)s.test.o \\
|
||||
$(foreach t,$(subst /,.,$(TESTSRC:.c=.o)),%(path)s.$t)
|
||||
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
|
||||
|
||||
# needed in case builddir is different
|
||||
%(path)s%%.o: %(path)s%%.c
|
||||
$(CC) -c -MMD $(CFLAGS) $< -o $@
|
||||
"""
|
||||
COVERAGE_RULES = """
|
||||
%(path)s.test: override CFLAGS += -fprofile-arcs -ftest-coverage
|
||||
|
||||
# delete lingering coverage
|
||||
%(path)s.test: | %(path)s.info.clean
|
||||
.PHONY: %(path)s.info.clean
|
||||
%(path)s.info.clean:
|
||||
rm -f %(path)s*.gcda
|
||||
|
||||
# accumulate coverage info
|
||||
.PHONY: %(path)s.info
|
||||
%(path)s.info:
|
||||
$(strip $(LCOV) -c \\
|
||||
$(addprefix -d ,$(wildcard %(path)s*.gcda)) \\
|
||||
--rc 'geninfo_adjust_src_path=$(shell pwd)' \\
|
||||
-o $@)
|
||||
$(LCOV) -e $@ $(addprefix /,$(SRC)) -o $@
|
||||
ifdef COVERAGETARGET
|
||||
$(strip $(LCOV) -a $@ \\
|
||||
$(addprefix -a ,$(wildcard $(COVERAGETARGET))) \\
|
||||
-o $(COVERAGETARGET))
|
||||
endif
|
||||
"""
|
||||
GLOBALS = """
|
||||
//////////////// AUTOGENERATED TEST ////////////////
|
||||
#include "lfs.h"
|
||||
#include "bd/lfs_testbd.h"
|
||||
#include <stdio.h>
|
||||
extern const char *lfs_testbd_path;
|
||||
extern uint32_t lfs_testbd_cycles;
|
||||
"""
|
||||
DEFINES = {
|
||||
'LFS_READ_SIZE': 16,
|
||||
'LFS_PROG_SIZE': 'LFS_READ_SIZE',
|
||||
'LFS_BLOCK_SIZE': 512,
|
||||
'LFS_BLOCK_COUNT': 1024,
|
||||
'LFS_BLOCK_CYCLES': -1,
|
||||
'LFS_CACHE_SIZE': '(64 % LFS_PROG_SIZE == 0 ? 64 : LFS_PROG_SIZE)',
|
||||
'LFS_LOOKAHEAD_SIZE': 16,
|
||||
'LFS_ERASE_VALUE': 0xff,
|
||||
'LFS_ERASE_CYCLES': 0,
|
||||
'LFS_BADBLOCK_BEHAVIOR': 'LFS_TESTBD_BADBLOCK_PROGERROR',
|
||||
}
|
||||
PROLOGUE = """
|
||||
// prologue
|
||||
__attribute__((unused)) lfs_t lfs;
|
||||
__attribute__((unused)) lfs_testbd_t bd;
|
||||
__attribute__((unused)) lfs_file_t file;
|
||||
__attribute__((unused)) lfs_dir_t dir;
|
||||
__attribute__((unused)) struct lfs_info info;
|
||||
__attribute__((unused)) char path[1024];
|
||||
__attribute__((unused)) uint8_t buffer[1024];
|
||||
__attribute__((unused)) lfs_size_t size;
|
||||
__attribute__((unused)) int err;
|
||||
|
||||
__attribute__((unused)) const struct lfs_config cfg = {
|
||||
.context = &bd,
|
||||
.read = lfs_testbd_read,
|
||||
.prog = lfs_testbd_prog,
|
||||
.erase = lfs_testbd_erase,
|
||||
.sync = lfs_testbd_sync,
|
||||
.read_size = LFS_READ_SIZE,
|
||||
.prog_size = LFS_PROG_SIZE,
|
||||
.block_size = LFS_BLOCK_SIZE,
|
||||
.block_count = LFS_BLOCK_COUNT,
|
||||
.block_cycles = LFS_BLOCK_CYCLES,
|
||||
.cache_size = LFS_CACHE_SIZE,
|
||||
.lookahead_size = LFS_LOOKAHEAD_SIZE,
|
||||
};
|
||||
|
||||
__attribute__((unused)) const struct lfs_testbd_config bdcfg = {
|
||||
.erase_value = LFS_ERASE_VALUE,
|
||||
.erase_cycles = LFS_ERASE_CYCLES,
|
||||
.badblock_behavior = LFS_BADBLOCK_BEHAVIOR,
|
||||
.power_cycles = lfs_testbd_cycles,
|
||||
};
|
||||
|
||||
lfs_testbd_createcfg(&cfg, lfs_testbd_path, &bdcfg) => 0;
|
||||
"""
|
||||
EPILOGUE = """
|
||||
// epilogue
|
||||
lfs_testbd_destroy(&cfg) => 0;
|
||||
"""
|
||||
PASS = '\033[32m✓\033[0m'
|
||||
FAIL = '\033[31m✗\033[0m'
|
||||
|
||||
class TestFailure(Exception):
|
||||
def __init__(self, case, returncode=None, stdout=None, assert_=None):
|
||||
self.case = case
|
||||
self.returncode = returncode
|
||||
self.stdout = stdout
|
||||
self.assert_ = assert_
|
||||
|
||||
class TestCase:
|
||||
def __init__(self, config, filter=filter,
|
||||
suite=None, caseno=None, lineno=None, **_):
|
||||
self.config = config
|
||||
self.filter = filter
|
||||
self.suite = suite
|
||||
self.caseno = caseno
|
||||
self.lineno = lineno
|
||||
|
||||
self.code = config['code']
|
||||
self.code_lineno = config['code_lineno']
|
||||
self.defines = config.get('define', {})
|
||||
self.if_ = config.get('if', None)
|
||||
self.in_ = config.get('in', None)
|
||||
|
||||
self.result = None
|
||||
|
||||
def __str__(self):
|
||||
if hasattr(self, 'permno'):
|
||||
if any(k not in self.case.defines for k in self.defines):
|
||||
return '%s#%d#%d (%s)' % (
|
||||
self.suite.name, self.caseno, self.permno, ', '.join(
|
||||
'%s=%s' % (k, v) for k, v in self.defines.items()
|
||||
if k not in self.case.defines))
|
||||
else:
|
||||
return '%s#%d#%d' % (
|
||||
self.suite.name, self.caseno, self.permno)
|
||||
else:
|
||||
return '%s#%d' % (
|
||||
self.suite.name, self.caseno)
|
||||
|
||||
def permute(self, class_=None, defines={}, permno=None, **_):
|
||||
ncase = (class_ or type(self))(self.config)
|
||||
for k, v in self.__dict__.items():
|
||||
setattr(ncase, k, v)
|
||||
ncase.case = self
|
||||
ncase.perms = [ncase]
|
||||
ncase.permno = permno
|
||||
ncase.defines = defines
|
||||
return ncase
|
||||
|
||||
def build(self, f, **_):
|
||||
# prologue
|
||||
for k, v in sorted(self.defines.items()):
|
||||
if k not in self.suite.defines:
|
||||
f.write('#define %s %s\n' % (k, v))
|
||||
|
||||
f.write('void test_case%d(%s) {' % (self.caseno, ','.join(
|
||||
'\n'+8*' '+'__attribute__((unused)) intmax_t %s' % k
|
||||
for k in sorted(self.perms[0].defines)
|
||||
if k not in self.defines)))
|
||||
|
||||
f.write(PROLOGUE)
|
||||
f.write('\n')
|
||||
f.write(4*' '+'// test case %d\n' % self.caseno)
|
||||
f.write(4*' '+'#line %d "%s"\n' % (self.code_lineno, self.suite.path))
|
||||
|
||||
# test case goes here
|
||||
f.write(self.code)
|
||||
|
||||
# epilogue
|
||||
f.write(EPILOGUE)
|
||||
f.write('}\n')
|
||||
|
||||
for k, v in sorted(self.defines.items()):
|
||||
if k not in self.suite.defines:
|
||||
f.write('#undef %s\n' % k)
|
||||
|
||||
def shouldtest(self, **args):
|
||||
if (self.filter is not None and
|
||||
len(self.filter) >= 1 and
|
||||
self.filter[0] != self.caseno):
|
||||
return False
|
||||
elif (self.filter is not None and
|
||||
len(self.filter) >= 2 and
|
||||
self.filter[1] != self.permno):
|
||||
return False
|
||||
elif args.get('no_internal') and self.in_ is not None:
|
||||
return False
|
||||
elif self.if_ is not None:
|
||||
if_ = self.if_
|
||||
while True:
|
||||
for k, v in sorted(self.defines.items(),
|
||||
key=lambda x: len(x[0]), reverse=True):
|
||||
if k in if_:
|
||||
if_ = if_.replace(k, '(%s)' % v)
|
||||
break
|
||||
else:
|
||||
break
|
||||
if_ = (
|
||||
re.sub('(\&\&|\?)', ' and ',
|
||||
re.sub('(\|\||:)', ' or ',
|
||||
re.sub('!(?!=)', ' not ', if_))))
|
||||
return eval(if_)
|
||||
else:
|
||||
return True
|
||||
|
||||
def test(self, exec=[], persist=False, cycles=None,
|
||||
gdb=False, failure=None, disk=None, **args):
|
||||
# build command
|
||||
cmd = exec + ['./%s.test' % self.suite.path,
|
||||
repr(self.caseno), repr(self.permno)]
|
||||
|
||||
# persist disk or keep in RAM for speed?
|
||||
if persist:
|
||||
if not disk:
|
||||
disk = self.suite.path + '.disk'
|
||||
if persist != 'noerase':
|
||||
try:
|
||||
with open(disk, 'w') as f:
|
||||
f.truncate(0)
|
||||
if args.get('verbose'):
|
||||
print('truncate --size=0', disk)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
cmd.append(disk)
|
||||
|
||||
# simulate power-loss after n cycles?
|
||||
if cycles:
|
||||
cmd.append(str(cycles))
|
||||
|
||||
# failed? drop into debugger?
|
||||
if gdb and failure:
|
||||
ncmd = ['gdb']
|
||||
if gdb == 'assert':
|
||||
ncmd.extend(['-ex', 'r'])
|
||||
if failure.assert_:
|
||||
ncmd.extend(['-ex', 'up 2'])
|
||||
elif gdb == 'main':
|
||||
ncmd.extend([
|
||||
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
|
||||
'-ex', 'r'])
|
||||
ncmd.extend(['--args'] + cmd)
|
||||
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in ncmd))
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
sys.exit(sp.call(ncmd))
|
||||
|
||||
# run test case!
|
||||
mpty, spty = pty.openpty()
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||
os.close(spty)
|
||||
mpty = os.fdopen(mpty, 'r', 1)
|
||||
stdout = []
|
||||
assert_ = None
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
line = mpty.readline()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
sys.stdout.write(line)
|
||||
# intercept asserts
|
||||
m = re.match(
|
||||
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
|
||||
.format('(?:\033\[[\d;]*.| )*', 'assert'),
|
||||
line)
|
||||
if m and assert_ is None:
|
||||
try:
|
||||
with open(m.group(1)) as f:
|
||||
lineno = int(m.group(2))
|
||||
line = (next(it.islice(f, lineno-1, None))
|
||||
.strip('\n'))
|
||||
assert_ = {
|
||||
'path': m.group(1),
|
||||
'line': line,
|
||||
'lineno': lineno,
|
||||
'message': m.group(3)}
|
||||
except:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
raise TestFailure(self, 1, stdout, None)
|
||||
proc.wait()
|
||||
|
||||
# did we pass?
|
||||
if proc.returncode != 0:
|
||||
raise TestFailure(self, proc.returncode, stdout, assert_)
|
||||
else:
|
||||
return PASS
|
||||
|
||||
class ValgrindTestCase(TestCase):
|
||||
def __init__(self, config, **args):
|
||||
self.leaky = config.get('leaky', False)
|
||||
super().__init__(config, **args)
|
||||
|
||||
def shouldtest(self, **args):
|
||||
return not self.leaky and super().shouldtest(**args)
|
||||
|
||||
def test(self, exec=[], **args):
|
||||
verbose = args.get('verbose')
|
||||
uninit = (self.defines.get('LFS_ERASE_VALUE', None) == -1)
|
||||
exec = [
|
||||
'valgrind',
|
||||
'--leak-check=full',
|
||||
] + (['--undef-value-errors=no'] if uninit else []) + [
|
||||
] + (['--track-origins=yes'] if not uninit else []) + [
|
||||
'--error-exitcode=4',
|
||||
'--error-limit=no',
|
||||
] + (['--num-callers=1'] if not verbose else []) + [
|
||||
'-q'] + exec
|
||||
return super().test(exec=exec, **args)
|
||||
|
||||
class ReentrantTestCase(TestCase):
|
||||
def __init__(self, config, **args):
|
||||
self.reentrant = config.get('reentrant', False)
|
||||
super().__init__(config, **args)
|
||||
|
||||
def shouldtest(self, **args):
|
||||
return self.reentrant and super().shouldtest(**args)
|
||||
|
||||
def test(self, persist=False, gdb=False, failure=None, **args):
|
||||
for cycles in it.count(1):
|
||||
# clear disk first?
|
||||
if cycles == 1 and persist != 'noerase':
|
||||
persist = 'erase'
|
||||
else:
|
||||
persist = 'noerase'
|
||||
|
||||
# exact cycle we should drop into debugger?
|
||||
if gdb and failure and failure.cycleno == cycles:
|
||||
return super().test(gdb=gdb, persist=persist, cycles=cycles,
|
||||
failure=failure, **args)
|
||||
|
||||
# run tests, but kill the program after prog/erase has
|
||||
# been hit n cycles. We exit with a special return code if the
|
||||
# program has not finished, since this isn't a test failure.
|
||||
try:
|
||||
return super().test(persist=persist, cycles=cycles, **args)
|
||||
except TestFailure as nfailure:
|
||||
if nfailure.returncode == 33:
|
||||
continue
|
||||
else:
|
||||
nfailure.cycleno = cycles
|
||||
raise
|
||||
|
||||
class TestSuite:
|
||||
def __init__(self, path, classes=[TestCase], defines={},
|
||||
filter=None, **args):
|
||||
self.name = os.path.basename(path)
|
||||
if self.name.endswith('.toml'):
|
||||
self.name = self.name[:-len('.toml')]
|
||||
if args.get('build_dir'):
|
||||
self.toml = path
|
||||
self.path = args['build_dir'] + '/' + path
|
||||
else:
|
||||
self.toml = path
|
||||
self.path = path
|
||||
self.classes = classes
|
||||
self.defines = defines.copy()
|
||||
self.filter = filter
|
||||
|
||||
with open(self.toml) as f:
|
||||
# load tests
|
||||
config = toml.load(f)
|
||||
|
||||
# find line numbers
|
||||
f.seek(0)
|
||||
linenos = []
|
||||
code_linenos = []
|
||||
for i, line in enumerate(f):
|
||||
if re.match(r'\[\[\s*case\s*\]\]', line):
|
||||
linenos.append(i+1)
|
||||
if re.match(r'code\s*=\s*(\'\'\'|""")', line):
|
||||
code_linenos.append(i+2)
|
||||
|
||||
code_linenos.reverse()
|
||||
|
||||
# grab global config
|
||||
for k, v in config.get('define', {}).items():
|
||||
if k not in self.defines:
|
||||
self.defines[k] = v
|
||||
self.code = config.get('code', None)
|
||||
if self.code is not None:
|
||||
self.code_lineno = code_linenos.pop()
|
||||
|
||||
# create initial test cases
|
||||
self.cases = []
|
||||
for i, (case, lineno) in enumerate(zip(config['case'], linenos)):
|
||||
# code lineno?
|
||||
if 'code' in case:
|
||||
case['code_lineno'] = code_linenos.pop()
|
||||
# merge conditions if necessary
|
||||
if 'if' in config and 'if' in case:
|
||||
case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
|
||||
elif 'if' in config:
|
||||
case['if'] = config['if']
|
||||
# initialize test case
|
||||
self.cases.append(TestCase(case, filter=filter,
|
||||
suite=self, caseno=i+1, lineno=lineno, **args))
|
||||
|
||||
def __str__(self):
|
||||
return self.name
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.name < other.name
|
||||
|
||||
def permute(self, **args):
|
||||
for case in self.cases:
|
||||
# lets find all parameterized definitions, in one of [args.D,
|
||||
# suite.defines, case.defines, DEFINES]. Note that each of these
|
||||
# can be either a dict of defines, or a list of dicts, expressing
|
||||
# an initial set of permutations.
|
||||
pending = [{}]
|
||||
for inits in [self.defines, case.defines, DEFINES]:
|
||||
if not isinstance(inits, list):
|
||||
inits = [inits]
|
||||
|
||||
npending = []
|
||||
for init, pinit in it.product(inits, pending):
|
||||
ninit = pinit.copy()
|
||||
for k, v in init.items():
|
||||
if k not in ninit:
|
||||
try:
|
||||
ninit[k] = eval(v)
|
||||
except:
|
||||
ninit[k] = v
|
||||
npending.append(ninit)
|
||||
|
||||
pending = npending
|
||||
|
||||
# expand permutations
|
||||
pending = list(reversed(pending))
|
||||
expanded = []
|
||||
while pending:
|
||||
perm = pending.pop()
|
||||
for k, v in sorted(perm.items()):
|
||||
if not isinstance(v, str) and isinstance(v, abc.Iterable):
|
||||
for nv in reversed(v):
|
||||
nperm = perm.copy()
|
||||
nperm[k] = nv
|
||||
pending.append(nperm)
|
||||
break
|
||||
else:
|
||||
expanded.append(perm)
|
||||
|
||||
# generate permutations
|
||||
case.perms = []
|
||||
for i, (class_, defines) in enumerate(
|
||||
it.product(self.classes, expanded)):
|
||||
case.perms.append(case.permute(
|
||||
class_, defines, permno=i+1, **args))
|
||||
|
||||
# also track non-unique defines
|
||||
case.defines = {}
|
||||
for k, v in case.perms[0].defines.items():
|
||||
if all(perm.defines[k] == v for perm in case.perms):
|
||||
case.defines[k] = v
|
||||
|
||||
# track all perms and non-unique defines
|
||||
self.perms = []
|
||||
for case in self.cases:
|
||||
self.perms.extend(case.perms)
|
||||
|
||||
self.defines = {}
|
||||
for k, v in self.perms[0].defines.items():
|
||||
if all(perm.defines.get(k, None) == v for perm in self.perms):
|
||||
self.defines[k] = v
|
||||
|
||||
return self.perms
|
||||
|
||||
def build(self, **args):
|
||||
# build test files
|
||||
tf = open(self.path + '.test.tc', 'w')
|
||||
tf.write(GLOBALS)
|
||||
if self.code is not None:
|
||||
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
|
||||
tf.write(self.code)
|
||||
|
||||
tfs = {None: tf}
|
||||
for case in self.cases:
|
||||
if case.in_ not in tfs:
|
||||
tfs[case.in_] = open(self.path+'.'+
|
||||
re.sub('(\.c)?$', '.tc', case.in_.replace('/', '.')), 'w')
|
||||
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
|
||||
with open(case.in_) as f:
|
||||
for line in f:
|
||||
tfs[case.in_].write(line)
|
||||
tfs[case.in_].write('\n')
|
||||
tfs[case.in_].write(GLOBALS)
|
||||
|
||||
tfs[case.in_].write('\n')
|
||||
case.build(tfs[case.in_], **args)
|
||||
|
||||
tf.write('\n')
|
||||
tf.write('const char *lfs_testbd_path;\n')
|
||||
tf.write('uint32_t lfs_testbd_cycles;\n')
|
||||
tf.write('int main(int argc, char **argv) {\n')
|
||||
tf.write(4*' '+'int case_ = (argc > 1) ? atoi(argv[1]) : 0;\n')
|
||||
tf.write(4*' '+'int perm = (argc > 2) ? atoi(argv[2]) : 0;\n')
|
||||
tf.write(4*' '+'lfs_testbd_path = (argc > 3) ? argv[3] : NULL;\n')
|
||||
tf.write(4*' '+'lfs_testbd_cycles = (argc > 4) ? atoi(argv[4]) : 0;\n')
|
||||
for perm in self.perms:
|
||||
# test declaration
|
||||
tf.write(4*' '+'extern void test_case%d(%s);\n' % (
|
||||
perm.caseno, ', '.join(
|
||||
'intmax_t %s' % k for k in sorted(perm.defines)
|
||||
if k not in perm.case.defines)))
|
||||
# test call
|
||||
tf.write(4*' '+
|
||||
'if (argc < 3 || (case_ == %d && perm == %d)) {'
|
||||
' test_case%d(%s); '
|
||||
'}\n' % (perm.caseno, perm.permno, perm.caseno, ', '.join(
|
||||
str(v) for k, v in sorted(perm.defines.items())
|
||||
if k not in perm.case.defines)))
|
||||
tf.write('}\n')
|
||||
|
||||
for tf in tfs.values():
|
||||
tf.close()
|
||||
|
||||
# write makefiles
|
||||
with open(self.path + '.mk', 'w') as mk:
|
||||
mk.write(RULES.replace(4*' ', '\t') % dict(path=self.path))
|
||||
mk.write('\n')
|
||||
|
||||
# add coverage hooks?
|
||||
if args.get('coverage'):
|
||||
mk.write(COVERAGE_RULES.replace(4*' ', '\t') % dict(
|
||||
path=self.path))
|
||||
mk.write('\n')
|
||||
|
||||
# add truly global defines globally
|
||||
for k, v in sorted(self.defines.items()):
|
||||
mk.write('%s.test: override CFLAGS += -D%s=%r\n'
|
||||
% (self.path, k, v))
|
||||
|
||||
for path in tfs:
|
||||
if path is None:
|
||||
mk.write('%s: %s | %s\n' % (
|
||||
self.path+'.test.c',
|
||||
self.toml,
|
||||
self.path+'.test.tc'))
|
||||
else:
|
||||
mk.write('%s: %s %s | %s\n' % (
|
||||
self.path+'.'+path.replace('/', '.'),
|
||||
self.toml,
|
||||
path,
|
||||
self.path+'.'+re.sub('(\.c)?$', '.tc',
|
||||
path.replace('/', '.'))))
|
||||
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
|
||||
|
||||
self.makefile = self.path + '.mk'
|
||||
self.target = self.path + '.test'
|
||||
return self.makefile, self.target
|
||||
|
||||
def test(self, **args):
|
||||
# run test suite!
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write(self.name + ' ')
|
||||
sys.stdout.flush()
|
||||
for perm in self.perms:
|
||||
if not perm.shouldtest(**args):
|
||||
continue
|
||||
|
||||
try:
|
||||
result = perm.test(**args)
|
||||
except TestFailure as failure:
|
||||
perm.result = failure
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write(FAIL)
|
||||
sys.stdout.flush()
|
||||
if not args.get('keep_going'):
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write('\n')
|
||||
raise
|
||||
else:
|
||||
perm.result = PASS
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write(PASS)
|
||||
sys.stdout.flush()
|
||||
|
||||
if not args.get('verbose', True):
|
||||
sys.stdout.write('\n')
|
||||
|
||||
def main(**args):
|
||||
# figure out explicit defines
|
||||
defines = {}
|
||||
for define in args['D']:
|
||||
k, v, *_ = define.split('=', 2) + ['']
|
||||
defines[k] = v
|
||||
|
||||
# and what class of TestCase to run
|
||||
classes = []
|
||||
if args.get('normal'):
|
||||
classes.append(TestCase)
|
||||
if args.get('reentrant'):
|
||||
classes.append(ReentrantTestCase)
|
||||
if args.get('valgrind'):
|
||||
classes.append(ValgrindTestCase)
|
||||
if not classes:
|
||||
classes = [TestCase]
|
||||
|
||||
suites = []
|
||||
for testpath in args['test_paths']:
|
||||
# optionally specified test case/perm
|
||||
testpath, *filter = testpath.split('#')
|
||||
filter = [int(f) for f in filter]
|
||||
|
||||
# figure out the suite's toml file
|
||||
if os.path.isdir(testpath):
|
||||
testpath = testpath + '/*.toml'
|
||||
elif os.path.isfile(testpath):
|
||||
testpath = testpath
|
||||
elif testpath.endswith('.toml'):
|
||||
testpath = TEST_PATHS + '/' + testpath
|
||||
else:
|
||||
testpath = TEST_PATHS + '/' + testpath + '.toml'
|
||||
|
||||
# find tests
|
||||
for path in glob.glob(testpath):
|
||||
suites.append(TestSuite(path, classes, defines, filter, **args))
|
||||
|
||||
# sort for reproducibility
|
||||
suites = sorted(suites)
|
||||
|
||||
# generate permutations
|
||||
for suite in suites:
|
||||
suite.permute(**args)
|
||||
|
||||
# build tests in parallel
|
||||
print('====== building ======')
|
||||
makefiles = []
|
||||
targets = []
|
||||
for suite in suites:
|
||||
makefile, target = suite.build(**args)
|
||||
makefiles.append(makefile)
|
||||
targets.append(target)
|
||||
|
||||
cmd = (['make', '-f', 'Makefile'] +
|
||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
||||
[target for target in targets])
|
||||
mpty, spty = pty.openpty()
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
|
||||
os.close(spty)
|
||||
mpty = os.fdopen(mpty, 'r', 1)
|
||||
stdout = []
|
||||
while True:
|
||||
try:
|
||||
line = mpty.readline()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
raise
|
||||
if not line:
|
||||
break;
|
||||
stdout.append(line)
|
||||
if args.get('verbose'):
|
||||
sys.stdout.write(line)
|
||||
# intercept warnings
|
||||
m = re.match(
|
||||
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
|
||||
.format('(?:\033\[[\d;]*.| )*', 'warning'),
|
||||
line)
|
||||
if m and not args.get('verbose'):
|
||||
try:
|
||||
with open(m.group(1)) as f:
|
||||
lineno = int(m.group(2))
|
||||
line = next(it.islice(f, lineno-1, None)).strip('\n')
|
||||
sys.stdout.write(
|
||||
"\033[01m{path}:{lineno}:\033[01;35mwarning:\033[m "
|
||||
"{message}\n{line}\n\n".format(
|
||||
path=m.group(1), line=line, lineno=lineno,
|
||||
message=m.group(3)))
|
||||
except:
|
||||
pass
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
print('built %d test suites, %d test cases, %d permutations' % (
|
||||
len(suites),
|
||||
sum(len(suite.cases) for suite in suites),
|
||||
sum(len(suite.perms) for suite in suites)))
|
||||
|
||||
total = 0
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
total += perm.shouldtest(**args)
|
||||
if total != sum(len(suite.perms) for suite in suites):
|
||||
print('filtered down to %d permutations' % total)
|
||||
|
||||
# only requested to build?
|
||||
if args.get('build'):
|
||||
return 0
|
||||
|
||||
print('====== testing ======')
|
||||
try:
|
||||
for suite in suites:
|
||||
suite.test(**args)
|
||||
except TestFailure:
|
||||
pass
|
||||
|
||||
print('====== results ======')
|
||||
passed = 0
|
||||
failed = 0
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
if perm.result == PASS:
|
||||
passed += 1
|
||||
elif isinstance(perm.result, TestFailure):
|
||||
sys.stdout.write(
|
||||
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
|
||||
"{perm} failed\n".format(
|
||||
perm=perm, path=perm.suite.path, lineno=perm.lineno,
|
||||
returncode=perm.result.returncode or 0))
|
||||
if perm.result.stdout:
|
||||
if perm.result.assert_:
|
||||
stdout = perm.result.stdout[:-1]
|
||||
else:
|
||||
stdout = perm.result.stdout
|
||||
for line in stdout[-5:]:
|
||||
sys.stdout.write(line)
|
||||
if perm.result.assert_:
|
||||
sys.stdout.write(
|
||||
"\033[01m{path}:{lineno}:\033[01;31massert:\033[m "
|
||||
"{message}\n{line}\n".format(
|
||||
**perm.result.assert_))
|
||||
sys.stdout.write('\n')
|
||||
failed += 1
|
||||
|
||||
if args.get('coverage'):
|
||||
# collect coverage info
|
||||
# why -j1? lcov doesn't work in parallel because of gcov limitations
|
||||
cmd = (['make', '-j1', '-f', 'Makefile'] +
|
||||
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
|
||||
(['COVERAGETARGET=%s' % args['coverage']]
|
||||
if isinstance(args['coverage'], str) else []) +
|
||||
[suite.path + '.info' for suite in suites
|
||||
if any(perm.result == PASS for perm in suite.perms)])
|
||||
if args.get('verbose'):
|
||||
print(' '.join(shlex.quote(c) for c in cmd))
|
||||
proc = sp.Popen(cmd,
|
||||
stdout=sp.PIPE if not args.get('verbose') else None,
|
||||
stderr=sp.STDOUT if not args.get('verbose') else None,
|
||||
universal_newlines=True)
|
||||
stdout = []
|
||||
for line in proc.stdout:
|
||||
stdout.append(line)
|
||||
proc.wait()
|
||||
if proc.returncode != 0:
|
||||
if not args.get('verbose'):
|
||||
for line in stdout:
|
||||
sys.stdout.write(line)
|
||||
sys.exit(-1)
|
||||
|
||||
if args.get('gdb'):
|
||||
failure = None
|
||||
for suite in suites:
|
||||
for perm in suite.perms:
|
||||
if isinstance(perm.result, TestFailure):
|
||||
failure = perm.result
|
||||
if failure is not None:
|
||||
print('======= gdb ======')
|
||||
# drop into gdb
|
||||
failure.case.test(failure=failure, **args)
|
||||
sys.exit(0)
|
||||
|
||||
print('tests passed %d/%d (%.1f%%)' % (passed, total,
|
||||
100*(passed/total if total else 1.0)))
|
||||
print('tests failed %d/%d (%.1f%%)' % (failed, total,
|
||||
100*(failed/total if total else 1.0)))
|
||||
return 1 if failed > 0 else 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run parameterized tests in various configurations.")
|
||||
parser.add_argument('test_paths', nargs='*', default=[TEST_PATHS],
|
||||
help="Description of test(s) to run. By default, this is all tests \
|
||||
found in the \"{0}\" directory. Here, you can specify a different \
|
||||
directory of tests, a specific file, a suite by name, and even \
|
||||
specific test cases and permutations. For example \
|
||||
\"test_dirs#1\" or \"{0}/test_dirs.toml#1#1\".".format(TEST_PATHS))
|
||||
parser.add_argument('-D', action='append', default=[],
|
||||
help="Overriding parameter definitions.")
|
||||
parser.add_argument('-v', '--verbose', action='store_true',
|
||||
help="Output everything that is happening.")
|
||||
parser.add_argument('-k', '--keep-going', action='store_true',
|
||||
help="Run all tests instead of stopping on first error. Useful for CI.")
|
||||
parser.add_argument('-p', '--persist', choices=['erase', 'noerase'],
|
||||
nargs='?', const='erase',
|
||||
help="Store disk image in a file.")
|
||||
parser.add_argument('-b', '--build', action='store_true',
|
||||
help="Only build the tests, do not execute.")
|
||||
parser.add_argument('-g', '--gdb', choices=['init', 'main', 'assert'],
|
||||
nargs='?', const='assert',
|
||||
help="Drop into gdb on test failure.")
|
||||
parser.add_argument('--no-internal', action='store_true',
|
||||
help="Don't run tests that require internal knowledge.")
|
||||
parser.add_argument('-n', '--normal', action='store_true',
|
||||
help="Run tests normally.")
|
||||
parser.add_argument('-r', '--reentrant', action='store_true',
|
||||
help="Run reentrant tests with simulated power-loss.")
|
||||
parser.add_argument('--valgrind', action='store_true',
|
||||
help="Run non-leaky tests under valgrind to check for memory leaks.")
|
||||
parser.add_argument('--exec', default=[], type=lambda e: e.split(),
|
||||
help="Run tests with another executable prefixed on the command line.")
|
||||
parser.add_argument('--disk',
|
||||
help="Specify a file to use for persistent/reentrant tests.")
|
||||
parser.add_argument('--coverage', type=lambda x: x if x else True,
|
||||
nargs='?', const='',
|
||||
help="Collect coverage information during testing. This uses lcov/gcov \
|
||||
to accumulate coverage information into *.info files. May also \
|
||||
a path to a *.info file to accumulate coverage info into.")
|
||||
parser.add_argument('--build-dir',
|
||||
help="Build relative to the specified directory instead of the \
|
||||
current directory.")
|
||||
|
||||
sys.exit(main(**vars(parser.parse_args())))
|
Reference in New Issue
Block a user