Ϫfn*dZdZdZddlZddlZddlZddlZddlZddl m Z ddl m Z mZddlmZdd lmZmZmZmZmZmZmZmZmZmZmZmZmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;mZ>m?Z?m@Z@mAZAmBZBmCZCmDZDmEZEmFZFmGZGmHZHmIZImJZJmKZKmLZLmMZMmNZNmOZOmPZPmQZQmRZRmSZSmTZTmUZUmVZVmWZWmXZXgd ZYejd ejZ\ejd ejZ]Gd dejddZ_dZ`dZadZbdZcdZdeceadeczzebedzZedZfdZgdZhdZidZje`egeheiejZkdZle`ddebelzZmdelzZne`emenZoe`deod zZpe`epeoekZqd!Zrejdd"Zte`erZud#Zvd$Zwd%Zxd&Zye`eud'zeud(zZze`eud)zeud*zZ{e`d+e|e*d,-DZ}e`d.e}Z~e`eqe~e{efZeeezZe`eud/ze`d0dzeud1ze`d2dzZe`d3edezZece`eeqe~eefzZiZerD]"Zeveed0z<eweed2z<exeed'z<eyeed(z<$[eZeZerD]DZed2zed0zfD]Zejeed(zed'zfD]ZejeF[[d4ZGd5d6eZGd7d8eZGd9d:Zd;Zd<Zd=Zd>Z d?Zd@ZdAZdBZedCk(reyy)Da FIXME:https://github.com/twisted/twisted/issues/3843 This can be removed once t.persisted.aot is removed. New code should not make use of this. Tokenization help for Python programs. vendored from https://github.com/python/cpython/blob/6b825c1b8a14460641ca6f1647d83005c68199aa/Lib/tokenize.py Licence: https://docs.python.org/3/license.html tokenize(readline) is a generator that breaks a stream of bytes into Python tokens. It decodes the bytes according to PEP-0263 for determining source file encoding. It accepts a readline-like method which is called repeatedly to get the next line of input (or b"" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators. Additionally, all token lists start with an ENCODING token which tells you which encoding was used to decode the bytes stream. zKa-Ping Yee zpGvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger, Trent Nelson, Michael FoordN)open)BOM_UTF8lookup) TextIOWrapper)GAMPER AMPEREQUALASYNCATATEQUALAWAIT CIRCUMFLEXCIRCUMFLEXEQUALCOLON COLONEQUALCOMMACOMMENTDEDENTDOT DOUBLESLASHDOUBLESLASHEQUAL DOUBLESTARDOUBLESTAREQUALELLIPSISENCODING ENDMARKEREQEQUALEQUAL ERRORTOKENEXACT_TOKEN_TYPESGREATER GREATEREQUALINDENTISEOF ISNONTERMINAL ISTERMINALLBRACE LEFTSHIFTLEFTSHIFTEQUALLESS LESSEQUALLPARLSQBMINEQUALMINUSN_TOKENSNAMENEWLINENLNOTEQUAL NT_OFFSETNUMBEROPPERCENT PERCENTEQUALPLUS PLUSEQUALRARROWRBRACE RIGHTSHIFTRIGHTSHIFTEQUALRPARRSQBSEMISLASH SLASHEQUAL SOFT_KEYWORDSTAR STAREQUALSTRINGTILDE TYPE_COMMENT TYPE_IGNOREVBAR VBAREQUALtok_name)KrNr&r%r$rr1r6rHr2r#rr,r@r-rArrrBr:r/rFrCrLrr*r!rrr8r'r=rr4r+r"rIrr(r>rr;r.rGrDr9r rMrr)r?rrrr r r<rrr7r r rKrJrErrr3rr0r5tokenizegenerate_tokensdetect_encoding untokenize TokenInfoz&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)c"eZdZdZedZy)rScpd|jt|jfz}d|j|zS)Nz%d (%s)z8TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r))type)rVrN_replace)selfannotated_types =/usr/lib/python3/dist-packages/twisted/persisted/_tokenize.py__repr__zTokenInfo.__repr__s9"dii$))1D%EE Fmmm0 1 c|jtk(r%|jtvrt|jS|jSN)rVr7stringr rXs rZ exact_typezTokenInfo.exact_types2 99?t{{.??$T[[1 199 r\N)__name__ __module__ __qualname__r[propertyrar\rZrSrSs r\rSztype string start end linec0ddj|zdzS)N(|))joinchoicess rZgrouprns '" "S ((r\ct|dzS)N*rnrls rZanyrr '?S  r\ct|dzS)N?rqrls rZmaybervrsr\z[ \f\t]*z #[^\r\n]*z\\\r?\nz\w+z0[xX](?:_?[0-9a-fA-F])+z0[bB](?:_?[01])+z0[oO](?:_?[0-7])+z(?:0(?:_?0)*|[1-9](?:_?[0-9])*)z[eE][-+]?[0-9](?:_?[0-9])*z)[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?z\.[0-9](?:_?[0-9])*z[0-9](?:_?[0-9])*z[0-9](?:_?[0-9])*[jJ]z[jJ]c  gd}dh}|D]q}tj|D]W}tj|Dcgc]}||jfc}D]"}|j dj |$Ys|Scc}w)N)brufbrfr) _itertools permutationsproductupperaddrk)_valid_string_prefixesresultprefixtcrzs rZ_all_string_prefixesrs>TF('((0 'A ''!)DQ1aggi.)DE ' 2771:& ' '' M*EsB cJtj|tjSr^)recompileUNICODE)exprs rZ_compiler s ::dBJJ ''r\z[^'\\]*(?:\\.[^'\\]*)*'z[^"\\]*(?:\\.[^"\\]*)*"z%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''z%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""z'''z"""z'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"c#FK|]}tj|ywr^)rescape).0xs rZ r'sP1"))A,Ps!T)reversez\r?\nz'[^\n'\\]*(?:\\.[^\n'\\]*)*'z"[^\n"\\]*(?:\\.[^\n"\\]*)*"z \\\r?\n|\Zc eZdZy) TokenErrorNrbrcrdrfr\rZrrNr\rc eZdZy)StopTokenizingNrrfr\rZrrRrr\rc$eZdZdZdZdZdZy) Untokenizerc<g|_d|_d|_d|_y)Nrr)tokensprev_rowprev_colencodingr`s rZ__init__zUntokenizer.__init__Ws    r\c|\}}||jks||jk(r@||jkr1tdj|||j|j||jz }|r%|jj d|zd|_||jz }|r|jj d|zyy)Nz+start ({},{}) precedes previous end ({},{})\ r )rr ValueErrorformatrappend)rXstartrowcol row_offset col_offsets rZadd_whitespacezUntokenizer.add_whitespace]sS  #"63;N=DDdmmT]]  4==(  KK  v 2 3DM4==(  KK  sZ/ 0 r\ct|}g}d}|D]N}t|dk(r|j||n+|\}}}} } |tk(r||_@|t k(rn|t k(r|j|g|tk(r |j| \|_ |_ |ttfvrd}nG|rE|rC|d} |dt| k\r+|jj| t| |_ d}|j||jj|| \|_ |_ |ttfvs3|xjdz c_ d|_ Qdj!|jS)NFTrrr~)iterlencompatrrrr#rrpoprrr2r3rrrk) rXiterableitindents startlinertok_typetokenrendlineindents rZrRzUntokenizer.untokenizems[ (^  "A1v{ Ar"01 -HeUC8# % 9$6!u%V# /2, t}gr]* w 8s6{*KK&&v.$'KDM!    & KK  u %+. (DM4=GR=( " ! = ">wwt{{##r\cg}|jj}|dttfv}d}t j |g|D]}|dd\}} |t k(r| |_|ttfvr| dz } |tk(r |rd| z} d}nd}|tk(r|j| _|tk(r|jy|ttfvrd}n|r|r ||dd}|| y)NrFrrTr)rrr2r3rchainrrr1r6rHr#rr) rXrrr toks_appendr prevstringtoktoknumtokvals rZrzUntokenizer.compatskk(( !H" -  ##UGX6 C !WNFF! & $'#  6\F! " v&6! GR=( wGBK(!  9 r\N)rbrcrdrrrRrrfr\rZrrVs 1 #$J" r\rct}|j|}|j|j|j}|S)aTransform tokens back into Python source code. It returns a bytes object, encoded using the ENCODING token, which is the first token sequence output by tokenize. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited input: # Output bytes will tokenize back to the input t1 = [tok[:2] for tok in tokenize(f.readline)] newcode = untokenize(t1) readline = BytesIO(newcode).readline t2 = [tok[:2] for tok in tokenize(readline)] assert t1 == t2 )rrRrencode)rutouts rZrRrRs:( B -- !C {{jj% Jr\c|ddjjdd}|dk(s|jdry|dvs|jdry |S) z(Imitates get_normal_name in tokenizer.c.N _-utf-8zutf-8-)zlatin-1 iso-8859-1z iso-latin-1)zlatin-1-z iso-8859-1-z iso-latin-1-r)lowerreplace startswith)orig_encencs rZ_get_normal_namers[ 3B-    ' 'S 1C g~1 66#..3; Or\cj jjdd}d}fd}fd}|}|jtr d|dd}d}|s|gfS||}|r||gfSt j |s||gfS|}|s||gfS||}|r|||gfS|||gfS#t$rdYwxYw) a The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. NFrc2 S#t$rYywxYw)Nr\) StopIterationreadlinesrZ read_or_stopz%detect_encoding..read_or_stops" :   s  c |jd}tj |}|syt |jd} t|r5|dk7r+ d}t|dj}t||d z }|S#t$r"d}dj|}t|wxYw#t$r0d|z}t|dj|}t|wxYw) Nrz'invalid or missing encoding declarationz {} for {!r}rzunknown encoding: zunknown encoding for {!r}: {}zencoding problem: utf-8z encoding problem for {!r}: utf-8z-sig) decodeUnicodeDecodeErrorr SyntaxError cookie_rematchrrnr LookupError)r line_stringmsgrr bom_foundfilenames rZ find_cookiez$detect_encoding..find_cookies# #++g.K ,#EKKN3 # 8  7"#3C"#&&=CCHMC!#&&  H;" #;C##**39c" "  # #*X5c" "6<>%  ^F 6"H%(( UFO ##O sB$$ B21B2ct|d} t|j\}}|jdt ||d}d|_|S#t $r|jwxYw)zXOpen a file in read only mode using the encoding detected by detect_encoding(). rbrT)line_bufferingry) _builtin_openrQrseekrmode BaseExceptionclose)rbufferrlinestexts rZrr<sg8T *F)&//:% AVXdC    s ?AA)ct|\}}tjd}tj|t |d|}t |j |S)a The tokenize() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as bytes. Alternatively, readline can be a callable function terminating with StopIteration: readline = open(myfile, 'rb').__next__ # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the physical line. The first token sequence will always be an ENCODING token which tells you which encoding was used to decode the bytes stream. r\)rQrrepeatrr _tokenize__next__)rrconsumedemptyrl_gens rZrOrOLsO&)2Hh   c "E   hXs(;U CF V__h //r\c # Kd}d}dx}x}}d}d\}} d} dg} ||dk(rd}tt|dddd} d} | } |} || j|} |d z }dt | }}|r| s t d ||j | }|r<|jdx}}tt|| d|z|||f| | zd\}} d} n| r8| d dd k7r0| d ddk7r(tt|| z||t | f| d}d} || z}| | z} |dk(rL|sI| snd}||krA| |dk(r|d z }n(| |dk(r|tzd ztz}n | |dk(rd}nn |d z }||krA||k(rnx| |dvrv| |dk(rG| |djd}tt|||f||t |zf| |t |z }tt| |d||f|t | f| || dkDr-| j|tt| d||df||f| || dkrN|| vrt!dd||| f| dd} tt"d||f||f| || dkr;n| st d|dfd}||krht%t&j | |}|r|j)d \}}||f||f|}}}||k(rL| ||| |}}||vs|dk(r!|dk7r|dk7rtt*|||| n|dvr3|dkDrtt|||| ntt,|||| n|dk(r*|j/drJtt|||| n|t0vrbt%t2|}|j | |}|r/|jd}| ||}tt||||f| n$||f}| |d}| } n|t4vs|ddt4vs |ddt4vr~|ddk(r`||f}t%t2j7|xs2t2j7|d xst2j7|d}| |dd } }| } ntt|||| n{|j9rtt:|||| nU|dk(rd }nM|dvr|d z }n |d vr|d z}tt<|||| n$tt| |||f||d zf| |d z }||krh| rZ| ddvrS| j?jAds4tt,d|d z t | f|d z t | d zfd| d dD]}tt"d|df|dfdttBd|df|dfdy#t$rd} YDwxYww)!Nr 0123456789)r~rrr)rrr~r\rzEOF in multi-line stringrz\ r  z# #z rz3unindent does not match any outer indentation levelz zEOF in multi-line statement.z... rr\z([{z)]})"rSrrrrrrrrHrtabsizerstriprr3rr#IndentationErrorrr PseudoTokenspanr6r2endswith triple_quotedendpats single_quotedget isidentifierr1r7striprr)rrstrstartendproglnumparenlev continuednumcharscontstrneedcontcontliner last_linerposmaxendmatchrcolumn comment_token pseudomatchrsposeposrinitialrs rZrresHG"##D#8iHGXHcG { "H(FFB??I D   I:D  ;;x(D  c$iS  !;XFF}}T*H$LLO+cGd4Cj0(T3KTX%*!d23i61d23i86K$4T:KX!D.#d? ]9F)9#aKF#Y$&$/!3w>F#Y$&Fq)czCyG#9#$(J$5$5f$=M#%s sS%778 3}--CDJs dCI=NPTUU #v&Tc T1Ic{DQQ72;&(*M%tS$7"#2,T3K$dKK72;& !>q JJICi";/55dC@K(--a0 s#'-$cCdC<!%eC$u+wh&sNu|#FE4tDD&!|'E4tDD'dDII^$~~d333#GUD$EEm+&wu~6G&}}T37H&ll1o $U3'tdC[$OO$(%="&uv,#'},RayM1RayM1RyD($(%=#+#KK05&{{5845&{{584# -1L!#''tT4HH))+#D%tTBB_ !I%' A  E) A #BtT4@@S D#;sQwqiCis b  bM '!,,S1 R$(C N3dQhIQR@R5SUW  !"+>T1Iay"==> IrD!9tQi <:V U<DV$C;V I?V!BV< V V V  Vct|dS)zTokenize a source reading Python code as unicode strings. This has the same API as tokenize(), except that it expects the *readline* callable to return str objects instead of bytes. N)rrs rZrPrP2s Xt $$r\cP ddl}d d fd }|jd}|jdddd |jd d d dd|j} |jr@|j}t |d5}t t|j}dddn&d}ttjjd}D]g}|j}|jr |j}d|j|j zz} t#| dt$|d|j&diy#1swYvxYw#t($r8} | j*ddd\} } || j*d| | fYd} ~ yd} ~ wt,$r5} | j*d\} } || j*d| | fYd} ~ yd} ~ wt.$r} || Yd} ~ yd} ~ wt0$r} || Yd} ~ yd} ~ wt2$rt#dYyt4$r}  d| zd} ~ wwxYw)Nrctjj|tjjdy)Nr)sysstderrwrite)messages rZperrorzmain..perror?s& ! r\c|r|f|z|fz}d|zn|r|d|n d|ztjdy)Nz%s:%d:%d: error: %sz : error: z error: %sr)r3exit)r6rlocationargsr7s rZerrorzmain..errorCsO ;)WJ6D (4/ 0  h8 9 ;( )  r\zpython -m tokenize)progrruz filename.pyz'the file to tokenize; defaults to stdin)destnargsmetavarhelpz-ez--exactexact store_truez(display token names using the exact type)r>actionrArzz %d,%d-%d,%d:2015rrz interrupted zunexpected error: %s)NN)argparseArgumentParser add_argument parse_argsrrlistrOrrr3stdinrVrBrarrprintrNr_rr;rrOSErrorKeyboardInterrupt Exception)rGr<parserr;rr{rr token_type token_rangeerrrr*r7s @rZmainrU;s  $ $*> $ ?F   6      7     D ==}}Hx. 4!hqzz23 4 4!Hsyy1148F YEJzz"-- (EKK%)),CDK {HZ4H%,,W X  Y 4 4 5xx{1Q' f chhqk8dF^44 5xx{ f chhqk8dF^44  c8  c  o %+, sg$ED< BE<EE H%.F H%+G H% G H%&G33H% H% H  H%__main__)__doc__ __author__ __credits__ collections functools itertoolsrrr3builtinsrrcodecsrrior_tokenrr r r r r rrrrrrrrrrrrrrrrrrr r!r"r#r$r%r&r'r(r)r*r+r,r-r.r/r0r1r2r3r4r5r6r7r8r9r:r;r<r=r>r?r@rArBrCrDrErFrGrHrIrJrKrLrMrN__all__rASCIIrr namedtuplerSrnrrrv WhitespaceCommentIgnoreName Hexnumber Binnumber Octnumber Decnumber IntnumberExponent PointfloatExpfloat Floatnumber ImagnumberNumberr lru_cacher StringPrefixSingleDoubleSingle3Double3TripleStringsortedSpecialFunny PlainTokenTokenContStr PseudoExtrasrr_prefixsetrrrrzrrrPrrrrRrrQrOrrPrUrbrfr\rZrs:*   *#HHHHHHHHHHHHHHHHHHHTL \ BJJ@"(( K 2::0"(( ; & &&{4PQ  )!!   c*z12 2U7^ C  &  . )Y 9 = ( 02H (O  ( *J) +[7-B C z; 2  T(( *,-  $ # 2 2 |e#\E%9 : 2222  P/@$(OP Q h  65&$ /  11E#z4JJ11E#z4JJ ]GV4 5vugtLL #%'G#GGcM#GGcM&GGeO&GGeO '     A#gq3w !%iU #! q    Y ^ ^ B6 Z$z  02J=Z%BJ zFr\