Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import tokenize
- import sys
- import re
- re_string = re.compile(r'^([rub]*)([\'"]{1,3})(.*)\2$', re.S)
- class Converter:
- def __init__(self, filename):
- self.filename = filename
- self.out = []
- self.previous_end = None
- self.def_or_class = False
- self.docstring_expected = True
- def tokeneater(self, kind, token, start, end, line):
- if self.previous_end and self.previous_end[0] == start[0]:
- offset = self.previous_end[1]
- else:
- offset = 0
- self.out.append(' ' * (start[1] - offset))
- if kind == tokenize.STRING:
- prefixes, quotes, content = re_string.search(token).groups()
- if self.docstring_expected:
- token = '{}"""{}"""'.format(prefixes, content)
- elif start[0] != end[0]:
- token = "{}'''{}'''".format(prefixes, content)
- else:
- s = eval(token)
- if 'r' in prefixes:
- if "'" in s and '"' not in s:
- token = '{}"{}"'.format(prefixes, s)
- else:
- token = "{}'{}'".format(prefixes, s)
- try:
- eval(token)
- except SyntaxError:
- token = repr(s)
- else:
- token = repr(s)
- if kind not in {tokenize.NEWLINE, tokenize.NL,
- tokenize.INDENT, tokenize.COMMENT}:
- self.docstring_expected = False
- if self.def_or_class and kind == tokenize.OP and token == ':':
- self.def_or_class = False
- self.docstring_expected = True
- if kind == tokenize.NAME and token in {'def', 'class'}:
- self.def_or_class = True
- self.out.append(token)
- self.previous_end = end
- def convert(self):
- with open(self.filename, 'r+b') as file:
- tokenize.tokenize(file.readline, self.tokeneater)
- file.seek(0)
- for s in self.out:
- file.write(s)
- file.truncate()
- if __name__ == '__main__':
- for filename in sys.argv[1:]:
- Converter(filename).convert()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement