💾 Archived View for gmi.noulin.net › gitRepositories › md4c › file › test › spec_tests.py.gmi captured on 2024-07-09 at 02:33:33. Gemini links have been rewritten to link to archived content

View Raw

More Information

⬅️ Previous capture (2023-01-29)

-=-=-=-=-=-=-

md4c

Log

Files

Refs

README

LICENSE

spec_tests.py (5865B)

     1 #!/usr/bin/env python3
     2 # -*- coding: utf-8 -*-
     3 
     4 import sys
     5 from difflib import unified_diff
     6 import argparse
     7 import re
     8 import json
     9 from cmark import CMark
    10 from normalize import normalize_html
    11 
    12 if __name__ == "__main__":
    13     parser = argparse.ArgumentParser(description='Run cmark tests.')
    14     parser.add_argument('-p', '--program', dest='program', nargs='?', default=None,
    15             help='program to test')
    16     parser.add_argument('-s', '--spec', dest='spec', nargs='?', default='spec.txt',
    17             help='path to spec')
    18     parser.add_argument('-P', '--pattern', dest='pattern', nargs='?',
    19             default=None, help='limit to sections matching regex pattern')
    20     parser.add_argument('--library-dir', dest='library_dir', nargs='?',
    21             default=None, help='directory containing dynamic library')
    22     parser.add_argument('--no-normalize', dest='normalize',
    23             action='store_const', const=False, default=True,
    24             help='do not normalize HTML')
    25     parser.add_argument('-d', '--dump-tests', dest='dump_tests',
    26             action='store_const', const=True, default=False,
    27             help='dump tests in JSON format')
    28     parser.add_argument('--debug-normalization', dest='debug_normalization',
    29             action='store_const', const=True,
    30             default=False, help='filter stdin through normalizer for testing')
    31     parser.add_argument('-n', '--number', type=int, default=None,
    32             help='only consider the test with the given number')
    33     args = parser.parse_args(sys.argv[1:])
    34 
    35 def out(str):
    36     sys.stdout.buffer.write(str.encode('utf-8')) 
    37 
    38 def print_test_header(headertext, example_number, start_line, end_line):
    39     out("Example %d (lines %d-%d) %s\n" % (example_number,start_line,end_line,headertext))
    40 
    41 def do_test(test, normalize, result_counts):
    42     [retcode, actual_html, err] = cmark.to_html(test['markdown'])
    43     if retcode == 0:
    44         expected_html = test['html']
    45         unicode_error = None
    46         if normalize:
    47             try:
    48                 passed = normalize_html(actual_html) == normalize_html(expected_html)
    49             except UnicodeDecodeError as e:
    50                 unicode_error = e
    51                 passed = False
    52         else:
    53             passed = actual_html == expected_html
    54         if passed:
    55             result_counts['pass'] += 1
    56         else:
    57             print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
    58             out(test['markdown'] + '\n')
    59             if unicode_error:
    60                 out("Unicode error: " + str(unicode_error) + '\n')
    61                 out("Expected: " + repr(expected_html) + '\n')
    62                 out("Got:      " + repr(actual_html) + '\n')
    63             else:
    64                 expected_html_lines = expected_html.splitlines(True)
    65                 actual_html_lines = actual_html.splitlines(True)
    66                 for diffline in unified_diff(expected_html_lines, actual_html_lines,
    67                                 "expected HTML", "actual HTML"):
    68                     out(diffline)
    69             out('\n')
    70             result_counts['fail'] += 1
    71     else:
    72         print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
    73         out("program returned error code %d\n" % retcode)
    74         sys.stdout.buffer.write(err)
    75         result_counts['error'] += 1
    76 
    77 def get_tests(specfile):
    78     line_number = 0
    79     start_line = 0
    80     end_line = 0
    81     example_number = 0
    82     markdown_lines = []
    83     html_lines = []
    84     state = 0  # 0 regular text, 1 markdown example, 2 html output
    85     headertext = ''
    86     tests = []
    87 
    88     header_re = re.compile('#+ ')
    89 
    90     with open(specfile, 'r', encoding='utf-8', newline='\n') as specf:
    91         for line in specf:
    92             line_number = line_number + 1
    93             l = line.strip()
    94             if l == "`" * 32 + " example":
    95                 state = 1
    96             elif state == 2 and l == "`" * 32:
    97                 state = 0
    98                 example_number = example_number + 1
    99                 end_line = line_number
   100                 tests.append({
   101                     "markdown":''.join(markdown_lines).replace('→',"\t"),
   102                     "html":''.join(html_lines).replace('→',"\t"),
   103                     "example": example_number,
   104                     "start_line": start_line,
   105                     "end_line": end_line,
   106                     "section": headertext})
   107                 start_line = 0
   108                 markdown_lines = []
   109                 html_lines = []
   110             elif l == ".":
   111                 state = 2
   112             elif state == 1:
   113                 if start_line == 0:
   114                     start_line = line_number - 1
   115                 markdown_lines.append(line)
   116             elif state == 2:
   117                 html_lines.append(line)
   118             elif state == 0 and re.match(header_re, line):
   119                 headertext = header_re.sub('', line).strip()
   120     return tests
   121 
   122 if __name__ == "__main__":
   123     if args.debug_normalization:
   124         out(normalize_html(sys.stdin.read()))
   125         exit(0)
   126 
   127     all_tests = get_tests(args.spec)
   128     if args.pattern:
   129         pattern_re = re.compile(args.pattern, re.IGNORECASE)
   130     else:
   131         pattern_re = re.compile('.')
   132     tests = [ test for test in all_tests if re.search(pattern_re, test['section']) and (not args.number or test['example'] == args.number) ]
   133     if args.dump_tests:
   134         out(json.dumps(tests, ensure_ascii=False, indent=2))
   135         exit(0)
   136     else:
   137         skipped = len(all_tests) - len(tests)
   138         cmark = CMark(prog=args.program, library_dir=args.library_dir)
   139         result_counts = {'pass': 0, 'fail': 0, 'error': 0, 'skip': skipped}
   140         for test in tests:
   141             do_test(test, args.normalize, result_counts)
   142         out("{pass} passed, {fail} failed, {error} errored, {skip} skipped\n".format(**result_counts))
   143         exit(result_counts['fail'] + result_counts['error'])