diff options
author | John MacFarlane <jgm@berkeley.edu> | 2015-07-08 17:49:21 -0700 |
---|---|---|
committer | John MacFarlane <jgm@berkeley.edu> | 2015-07-08 17:50:12 -0700 |
commit | e6acb7e7a43ac64d83c410b9f83c13743b934d6f (patch) | |
tree | fb2d5a0488c463ea58c46260346d61d49ef8c32a | |
parent | 7bd74a60747c2c49f5bd498707be5c4a3313842e (diff) |
spec_tests.py: use stdout.out.buffer.write instead of print throughout.
Mixing them didn't work well; results were out of order.
-rwxr-xr-x | test/spec_tests.py | 27 |
1 files changed, 15 insertions, 12 deletions
diff --git a/test/spec_tests.py b/test/spec_tests.py index 11b271c..182368b 100755 --- a/test/spec_tests.py +++ b/test/spec_tests.py @@ -32,8 +32,11 @@ if __name__ == "__main__": help='only consider the test with the given number') args = parser.parse_args(sys.argv[1:]) +def out(str): + sys.stdout.buffer.write(str.encode('utf-8')) + def print_test_header(headertext, example_number, start_line, end_line): - print("Example %d (lines %d-%d) %s" % (example_number,start_line,end_line,headertext)) + out("Example %d (lines %d-%d) %s\n" % (example_number,start_line,end_line,headertext)) def do_test(test, normalize, result_counts): [retcode, actual_html, err] = cmark.to_html(test['markdown']) @@ -52,23 +55,23 @@ def do_test(test, normalize, result_counts): result_counts['pass'] += 1 else: print_test_header(test['section'], test['example'], test['start_line'], test['end_line']) - sys.stdout.buffer.write(test['markdown'].encode('utf-8')) + out(test['markdown'] + '\n') if unicode_error: - print("Unicode error: " + str(unicode_error)) - print("Expected: " + repr(expected_html)) - print("Got: " + repr(actual_html)) + out("Unicode error: " + str(unicode_error) + '\n') + out("Expected: " + repr(expected_html) + '\n') + out("Got: " + repr(actual_html) + '\n') else: expected_html_lines = expected_html.splitlines(True) actual_html_lines = actual_html.splitlines(True) for diffline in unified_diff(expected_html_lines, actual_html_lines, "expected HTML", "actual HTML"): - sys.stdout.buffer.write(diffline.encode('utf-8')) - sys.stdout.write('\n') + out(diffline) + out('\n') result_counts['fail'] += 1 else: print_test_header(test['section'], test['example'], test['start_line'], test['end_line']) - print("program returned error code %d" % retcode) - print(err) + out("program returned error code %d\n" % retcode) + out(err + '\n') result_counts['error'] += 1 def get_tests(specfile): @@ -114,7 +117,7 @@ def get_tests(specfile): if __name__ == "__main__": if args.debug_normalization: - print(normalize_html(sys.stdin.read())) + out(normalize_html(sys.stdin.read())) exit(0) all_tests = get_tests(args.spec) @@ -124,7 +127,7 @@ if __name__ == "__main__": pattern_re = re.compile('.') tests = [ test for test in all_tests if re.search(pattern_re, test['section']) and (not args.number or test['example'] == args.number) ] if args.dump_tests: - sys.stdout.buffer.write(json.dumps(tests, ensure_ascii=False, indent=2).encode('utf-8')) + out(json.dumps(tests, ensure_ascii=False, indent=2)) exit(0) else: skipped = len(all_tests) - len(tests) @@ -132,7 +135,7 @@ if __name__ == "__main__": result_counts = {'pass': 0, 'fail': 0, 'error': 0, 'skip': skipped} for test in tests: do_test(test, args.normalize, result_counts) - print("{pass} passed, {fail} failed, {error} errored, {skip} skipped".format(**result_counts)) + out("{pass} passed, {fail} failed, {error} errored, {skip} skipped\n".format(**result_counts)) if result_counts['fail'] == 0 and result_counts['error'] == 0: exit(0) else: |