Mercurial > p > roundup > code
comparison roundup/cgi/cgitb.py @ 6983:3129d73e8535
flake8 plus tests for cgitb.py
flake8 fixes for cgitb.py.
Also set up tests for functions in unit.
Need tests before making some other changes in cgitb.py.
| author | John Rouillard <rouilj@ieee.org> |
|---|---|
| date | Wed, 14 Sep 2022 22:35:23 -0400 |
| parents | 70e6b053193b |
| children | 57f34b0b912c |
comparison
equal
deleted
inserted
replaced
| 6982:e605ddb45701 | 6983:3129d73e8535 |
|---|---|
| 5 """Extended CGI traceback handler by Ka-Ping Yee, <ping@lfw.org>. | 5 """Extended CGI traceback handler by Ka-Ping Yee, <ping@lfw.org>. |
| 6 """ | 6 """ |
| 7 from __future__ import print_function | 7 from __future__ import print_function |
| 8 __docformat__ = 'restructuredtext' | 8 __docformat__ = 'restructuredtext' |
| 9 | 9 |
| 10 import sys, os, keyword, linecache, tokenize, inspect | 10 import inspect |
| 11 import pydoc, traceback | 11 import keyword |
| 12 import linecache | |
| 13 import os | |
| 14 import pydoc | |
| 15 import sys | |
| 16 import tokenize | |
| 17 import traceback | |
| 12 | 18 |
| 13 from roundup.anypy.html import html_escape | 19 from roundup.anypy.html import html_escape |
| 14 | 20 from roundup.anypy.strings import s2b |
| 15 from roundup.cgi import TranslationService | 21 from roundup.cgi import TranslationService |
| 16 from roundup.anypy.strings import s2b | |
| 17 | 22 |
| 18 | 23 |
| 19 def get_translator(i18n=None): | 24 def get_translator(i18n=None): |
| 20 """Return message translation function (gettext) | 25 """Return message translation function (gettext) |
| 21 | 26 |
| 43 | 48 |
| 44 def niceDict(indent, dict): | 49 def niceDict(indent, dict): |
| 45 l = [] | 50 l = [] |
| 46 for k in sorted(dict): | 51 for k in sorted(dict): |
| 47 v = dict[k] | 52 v = dict[k] |
| 48 l.append('<tr><td><strong>%s</strong></td><td>%s</td></tr>' % (k, | 53 l.append('<tr><td><strong>%s</strong></td><td>%s</td></tr>' % ( |
| 49 html_escape(repr(v)))) | 54 k, html_escape(repr(v)))) |
| 50 return '\n'.join(l) | 55 return '\n'.join(l) |
| 51 | 56 |
| 52 | 57 |
| 53 def pt_html(context=5, i18n=None): | 58 def pt_html(context=5, i18n=None): |
| 54 _ = get_translator(i18n) | 59 _ = get_translator(i18n) |
| 70 s = [] | 75 s = [] |
| 71 for name, info in ti.path: | 76 for name, info in ti.path: |
| 72 s.append(_('<li>"%(name)s" (%(info)s)</li>') | 77 s.append(_('<li>"%(name)s" (%(info)s)</li>') |
| 73 % {'name': name, 'info': esc(repr(info))}) | 78 % {'name': name, 'info': esc(repr(info))}) |
| 74 s = '\n'.join(s) | 79 s = '\n'.join(s) |
| 75 l.append(_('<li>Looking for "%(name)s", ' | 80 l.append(_( |
| 76 'current path:<ol>%(path)s</ol></li>' | 81 '<li>Looking for "%(name)s", ' |
| 77 ) % {'name': ti.name, 'path': s}) | 82 'current path:<ol>%(path)s</ol></li>' |
| 83 ) % {'name': ti.name, 'path': s}) | |
| 78 else: | 84 else: |
| 79 l.append(_('<li>In %s</li>') % esc(str(ti))) | 85 l.append(_('<li>In %s</li>') % esc(str(ti))) |
| 80 if '__traceback_supplement__' in locals: | 86 if '__traceback_supplement__' in locals: |
| 81 ts = locals['__traceback_supplement__'] | 87 ts = locals['__traceback_supplement__'] |
| 82 if len(ts) == 2: | 88 if len(ts) == 2: |
| 99 ''') % { | 105 ''') % { |
| 100 'info': info, | 106 'info': info, |
| 101 'line': context.position[0], | 107 'line': context.position[0], |
| 102 'globals': niceDict(' ', context.global_vars), | 108 'globals': niceDict(' ', context.global_vars), |
| 103 'locals': niceDict(' ', context.local_vars) | 109 'locals': niceDict(' ', context.local_vars) |
| 104 }) | 110 }) |
| 105 | 111 |
| 106 l.append(''' | 112 l.append(''' |
| 107 </ol> | 113 </ol> |
| 108 <table style="font-size: 80%%; color: gray"> | 114 <table style="font-size: 80%%; color: gray"> |
| 109 <tr><th class="header" align="left">%s</th></tr> | 115 <tr><th class="header" align="left">%s</th></tr> |
| 162 | 168 |
| 163 def tokeneater(type, token, start, end, line, names=names): | 169 def tokeneater(type, token, start, end, line, names=names): |
| 164 if type == tokenize.NAME and token not in keyword.kwlist: | 170 if type == tokenize.NAME and token not in keyword.kwlist: |
| 165 if token not in names: | 171 if token not in names: |
| 166 names.append(token) | 172 names.append(token) |
| 167 if type == tokenize.NEWLINE: raise IndexError | 173 if type == tokenize.NEWLINE: raise IndexError # noqa: E701 |
| 168 | 174 |
| 169 def linereader(file=file, lnum=[lnum]): | 175 def linereader(file=file, lnum=[lnum]): |
| 170 line = s2b(linecache.getline(file, lnum[0])) | 176 line = s2b(linecache.getline(file, lnum[0])) |
| 171 lnum[0] = lnum[0] + 1 | 177 lnum[0] = lnum[0] + 1 |
| 172 return line | 178 return line |
