U
    go                     @   s  d Z ddlZzddlmZ W n   ddlmZ Y nX ddlmZ ddlmZm	Z	m
Z
mZ ddlmZ ddlmZ edZed	ejZed
ejZedZzeddd W n" ek
r   edZdZY nBX ddlmZ edejZdZddlZejd= ddlZe`[edZ edZ!e	dZ"e	dZ#e	dZ$e	dZ%e	dZ&e	dZ'e	dZ(e	dZ)e	dZ*e	d Z+e	d!Z,e	d"Z-e	d#Z.e	d$Z/e	d%Z0e	d&Z1e	d'Z2e	d(Z3e	d)Z4e	d*Z5e	d+Z6e	d,Z7e	d-Z8e	d.Z9e	d/Z:e	d0Z;e	d1Z<e	d2Z=e	d3Z>e	d4Z?e	d5Z@e	d6ZAe	d7ZBe	d8ZCe	d9ZDe	d:ZEe	d;ZFe	d<ZGe	d=ZHe	d>ZIe	d?ZJe	d@ZKe	dAZLe	dBZMe	dCZNe	dDZOe	dEZPe	dFZQe	dGZRe"e:e&e)e2e1e5e;e-e7e.e8e,e6e(e3e*e+e/e0e#e'e$e4e%e9dHZSeTdIdJ e
eSD ZUeVeSeVeUkstWdKedLdMXdNdO eYeSdPdQ dRD  ZZe[eHeJeIe<eMeNeOgZ\e[e<ePeJeOgZ]dSdT Z^dUdV Z_dWdX Z`dYdZ Zad[d\ ZbG d]d^ d^ecZdG d_d` d`eeZfeG dadb dbecZgeG dcdd ddecZhdedf ZiG dgdh dhecZjdS )ia  
    jinja2.lexer
    ~~~~~~~~~~~~

    This module implements a Jinja / Python combination lexer. The
    `Lexer` class provided by this module is used to do some preprocessing
    for Jinja.

    On the one hand it filters out invalid operators like the bitshift
    operators we don't allow in templates. On the other hand it separates
    template code and python code in expressions.

    :copyright: (c) 2017 by the Jinja Team.
    :license: BSD, see LICENSE for more details.
    N)deque)
itemgetter)implements_iteratorintern	iteritems	text_type)TemplateSyntaxError)LRUCache2   z\s+z7('([^'\\]*(?:\\.[^'\\]*)*)'|"([^"\\]*(?:\\.[^"\\]*)*)")z\d+u   fööz	<unknown>evalz[a-zA-Z_][a-zA-Z0-9_]*F)_identifierz[\w{0}]+Tzjinja2._identifierz(?<!\.)\d+\.\d+z(\r\n|\r|\n)addZassignZcolonZcommaZdivdoteqfloordivgtZgteqZlbraceZlbracketZlparenltZlteqmodmulnepipepowZrbraceZrbracketZrparenZ	semicolonsubtildeZ
whitespacefloatintegernamestringoperatorblock_begin	block_endZvariable_beginvariable_end	raw_beginraw_endZcomment_beginZcomment_endcommentlinestatement_beginlinestatement_endZlinecomment_beginZlinecomment_endlinecommentdatainitialeof)+-/z//*%z**~[](){}z==z!=>z>=<z<==.:|,;c                 C   s   g | ]\}}||fqS  r?   ).0kvr?   r?   ./usr/lib/python3/dist-packages/jinja2/lexer.py
<listcomp>   s     rD   zoperators droppedz(%s)r<   c                 c   s   | ]}t |V  qd S N)reescaper@   xr?   r?   rC   	<genexpr>   s     rJ   c                 C   s
   t |  S rE   )lenrI   r?   r?   rC   <lambda>       rM   )keyc                 C   sL   | t krt |  S tdtdtdtdtdtdtdtdt	dt
d	td
tdi| | S )Nzbegin of commentzend of commentr$   zbegin of statement blockzend of statement blockzbegin of print statementzend of print statementzbegin of line statementzend of line statementztemplate data / textzend of template)reverse_operatorsTOKEN_COMMENT_BEGINTOKEN_COMMENT_ENDTOKEN_COMMENTTOKEN_LINECOMMENTTOKEN_BLOCK_BEGINTOKEN_BLOCK_ENDTOKEN_VARIABLE_BEGINTOKEN_VARIABLE_ENDTOKEN_LINESTATEMENT_BEGINTOKEN_LINESTATEMENT_END
TOKEN_DATA	TOKEN_EOFget)
token_typer?   r?   rC   _describe_token_type   s<                 r_   c                 C   s   | j dkr| jS t| j S )z#Returns a description of the token.r   )typevaluer_   )tokenr?   r?   rC   describe_token   s    
rc   c                 C   s2   d| kr&|  dd\}}|dkr*|S n| }t|S )z0Like `describe_token` but for token expressions.r;      r   )splitr_   )exprr`   ra   r?   r?   rC   describe_token_expr   s    rg   c                 C   s   t t| S )zsCount the number of newline characters in the string.  This is
    useful for extensions that filter a stream.
    )rK   
newline_refindall)ra   r?   r?   rC   count_newlines   s    rj   c                 C   s   t j}t| jd|| jft| jd|| jft| jd|| jfg}| jdk	rp|t| jdd|| j f | jdk	r|t| jdd|| j f d	d
 t	|ddD S )zACompiles all the rules from the environment into a list of rules.r$   blockvariableNZlinestatementz	^[ \t\v]*r'   z(?:^|(?<=\S))[^\S\r\n]*c                 S   s   g | ]}|d d qS )rd   Nr?   rH   r?   r?   rC   rD      s     z!compile_rules.<locals>.<listcomp>T)reverse)
rF   rG   rK   comment_start_stringblock_start_stringvariable_start_stringline_statement_prefixappendline_comment_prefixsorted)environmenterulesr?   r?   rC   compile_rules   s,    


	

rx   c                   @   s$   e Zd ZdZefddZdd ZdS )FailurezjClass that raises a `TemplateSyntaxError` if called.
    Used by the `Lexer` to specify known errors.
    c                 C   s   || _ || _d S rE   )messageerror_class)selfrz   clsr?   r?   rC   __init__   s    zFailure.__init__c                 C   s   |  | j||d S rE   )r{   rz   )r|   linenofilenamer?   r?   rC   __call__   s    zFailure.__call__N)__name__
__module____qualname____doc__r   r~   r   r?   r?   r?   rC   ry      s   ry   c                   @   sT   e Zd ZdZdZdd edD \ZZZdd Z	dd	 Z
d
d Zdd Zdd ZdS )TokenzToken class.r?   c                 c   s   | ]}t t|V  qd S rE   )propertyr   rH   r?   r?   rC   rJ      s     zToken.<genexpr>   c                 C   s   t | |tt||fS rE   )tuple__new__r   str)r}   r   r`   ra   r?   r?   rC   r      s    zToken.__new__c                 C   s*   | j tkrt| j  S | j dkr$| jS | j S )Nr   )r`   rP   ra   r|   r?   r?   rC   __str__   s
    


zToken.__str__c                 C   s2   | j |krdS d|kr.|dd| j | jgkS dS )zTest a token against a token expression.  This can either be a
        token type or ``'token_type:token_value'``.  This can only test
        against string values and types.
        Tr;   rd   F)r`   re   ra   r|   rf   r?   r?   rC   test   s
    
z
Token.testc                 G   s   |D ]}|  |r dS qdS )z(Test against multiple token expressions.TF)r   )r|   iterablerf   r?   r?   rC   test_any	  s    
zToken.test_anyc                 C   s   d| j | j| jf S )NzToken(%r, %r, %r))r   r`   ra   r   r?   r?   rC   __repr__  s
    zToken.__repr__N)r   r   r   r   	__slots__ranger   r`   ra   r   r   r   r   r   r?   r?   r?   rC   r      s   r   c                   @   s(   e Zd ZdZdd Zdd Zdd ZdS )	TokenStreamIteratorz`The iterator for tokenstreams.  Iterate over the stream
    until the eof token is reached.
    c                 C   s
   || _ d S rE   )stream)r|   r   r?   r?   rC   r~     s    zTokenStreamIterator.__init__c                 C   s   | S rE   r?   r   r?   r?   rC   __iter__!  s    zTokenStreamIterator.__iter__c                 C   s0   | j j}|jtkr"| j   t t| j  |S rE   )r   currentr`   r\   closeStopIterationnextr|   rb   r?   r?   rC   __next__$  s    


zTokenStreamIterator.__next__N)r   r   r   r   r~   r   r   r?   r?   r?   rC   r     s   r   c                   @   s~   e Zd ZdZdd Zdd Zdd ZeZedd	 d
dZ	dd Z
dd ZdddZdd Zdd Zdd Zdd Zdd ZdS )TokenStreamzA token stream is an iterable that yields :class:`Token`\s.  The
    parser however does not iterate over it but calls :meth:`next` to go
    one token ahead.  The current active token is stored as :attr:`current`.
    c                 C   s>   t || _t | _|| _|| _d| _tdtd| _	t
|  d S )NFrd    )iter_iterr   _pushedr   r   closedr   TOKEN_INITIALr   r   )r|   	generatorr   r   r?   r?   rC   r~   4  s    
zTokenStream.__init__c                 C   s   t | S rE   )r   r   r?   r?   rC   r   =  s    zTokenStream.__iter__c                 C   s   t | jp| jjtk	S rE   )boolr   r   r`   r\   r   r?   r?   rC   __bool__@  s    zTokenStream.__bool__c                 C   s   |  S rE   r?   rL   r?   r?   rC   rM   D  rN   zTokenStream.<lambda>z Are we at the end of the stream?)docc                 C   s   | j | dS )z Push a token back to the stream.N)r   rr   r   r?   r?   rC   pushF  s    zTokenStream.pushc                 C   s"   t | }| j}| | || _|S )zLook at the next token.)r   r   r   )r|   Z	old_tokenresultr?   r?   rC   lookJ  s
    
zTokenStream.lookrd   c                 C   s   t |D ]}t|  qdS )zGot n tokens ahead.N)r   r   )r|   nrI   r?   r?   rC   skipR  s    zTokenStream.skipc                 C   s   | j |rt| S dS )zqPerform the token test and return the token if it matched.
        Otherwise the return value is `None`.
        N)r   r   r   r   r?   r?   rC   next_ifW  s    zTokenStream.next_ifc                 C   s   |  |dk	S )z8Like :meth:`next_if` but only returns `True` or `False`.N)r   r   r?   r?   rC   skip_if^  s    zTokenStream.skip_ifc                 C   sX   | j }| jr| j | _ n:| j jtk	rTzt| j| _ W n tk
rR   |   Y nX |S )z|Go one token ahead and return the old one.

        Use the built-in :func:`next` instead of calling this directly.
        )	r   r   popleftr`   r\   r   r   r   r   )r|   rvr?   r?   rC   r   b  s    zTokenStream.__next__c                 C   s"   t | jjtd| _d| _d| _dS )zClose the stream.r   NT)r   r   r   r\   r   r   r   r?   r?   rC   r   q  s    zTokenStream.closec                 C   sx   | j |s^t|}| j jtkr:td| | j j| j| jtd|t	| j f | j j| j| jz
| j W S t
|  X dS )z}Expect a given token type and return it.  This accepts the same
        argument as :meth:`jinja2.lexer.Token.test`.
        z(unexpected end of template, expected %r.zexpected token %r, got %rN)r   r   rg   r`   r\   r   r   r   r   rc   r   r   r?   r?   rC   expectw  s(      
zTokenStream.expectN)rd   )r   r   r   r   r~   r   r   Z__nonzero__r   Zeosr   r   r   r   r   r   r   r   r?   r?   r?   rC   r   -  s   	
r   c                 C   sZ   | j | j| j| j| j| j| j| j| j| j	| j
| jf}t|}|dkrVt| }|t|< |S )z(Return a lexer which is probably cached.N)ro   block_end_stringrp   variable_end_stringrn   comment_end_stringrq   rs   trim_blockslstrip_blocksnewline_sequencekeep_trailing_newline_lexer_cacher]   Lexer)ru   rO   Zlexerr?   r?   rC   	get_lexer  s$    
r   c                   @   s>   e Zd ZdZdd Zdd ZdddZdd	d
ZdddZdS )r   a  Class that implements a lexer for a given environment. Automatically
    created by the environment class, usually you don't have to do that.

    Note that the lexer is not automatically bound to an environment.
    Multiple environments can share the same lexer.
    c                    s  dd }t j}ttd fttd fttd ftt	d ft
td fttd fg}t|}|jrTdpVd}i  |jr\|d}|d||j }||j}	||	rd||	d pd7 }||j}	||	rd||	d pd7 }|d||j }
|
|j}	|	r
d	||	d pd}d
}d|||j|||jf }d|||j|||jf }| d< | d< nd||j }|j| _|j| _d|ddd||j|||j||jf g fdd|D   tdfdf|dtd fgt|d||j||j|f ttfdf|dtdfd fgt |d||j||j|f t!dfg| t"|d||j#||j#f t$dfg| t%|d||j|||j||j|f tt&fdf|dtdfd fgt'|d t(dfg| t)|d!t*t+fdfgi| _,d S )"Nc                 S   s   t | t jt jB S rE   )rF   compileMSrL   r?   r?   rC   rM     rN   z Lexer.__init__.<locals>.<lambda>z\n?r   r+   z^%s(.*)z|%srd   z(?!%s)z^[ \t]*z%s%s(?!%s)|%s\+?z%s%s%s|%s\+?rk   r$   z%srootz(.*?)(?:%s)r<   z4(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))c              	      s&   g | ]\}}d ||  ||f qS )z(?P<%s_begin>\s*%s\-|%s))r]   )r@   r   rZ	prefix_rer?   rC   rD     s   z"Lexer.__init__.<locals>.<listcomp>#bygroupz.+z(.*?)((?:\-%s\s*|%s)%s)#popz(.)zMissing end of comment tagz(?:\-%s\s*|%s)%sz
\-%s\s*|%sz1(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))zMissing end of raw directivez	\s*(\n|$)z(.*?)()(?=\n|$))-rF   rG   whitespace_reTOKEN_WHITESPACEfloat_reTOKEN_FLOAT
integer_reTOKEN_INTEGERname_re
TOKEN_NAME	string_reTOKEN_STRINGoperator_reTOKEN_OPERATORrx   r   r   ro   matchrn   grouprp   r   r   joinr   r[   rQ   r   rS   rR   ry   rU   rV   rW   r   rX   TOKEN_RAW_BEGINTOKEN_RAW_ENDrY   rZ   TOKEN_LINECOMMENT_BEGINrT   TOKEN_LINECOMMENT_ENDrw   )r|   ru   crv   Z	tag_rulesZroot_tag_rulesZblock_suffix_reZno_lstrip_reZ
block_diffmZcomment_diffZno_variable_reZ	lstrip_reZblock_prefix_reZcomment_prefix_rer?   r   rC   r~     s    

	  	   zLexer.__init__c                 C   s   t | j|S )z@Called for strings and template data to normalize it to unicode.)rh   r   r   )r|   ra   r?   r?   rC   _normalize_newlines)  s    zLexer._normalize_newlinesNc                 C   s&   |  ||||}t| |||||S )zCCalls tokeniter + tokenize and wraps it in a token stream.
        )	tokeniterr   wrap)r|   sourcer   r   stater   r?   r?   rC   tokenize-  s    zLexer.tokenizec           	      c   sb  |D ]V\}}}|t krqn0|dkr.d}n |dkr>d}n|dkrLqn|dkr`| |}n|dkrn|}n|dkrt|}tr| std	|||n|d
krz$| |dd ddd}W nH tk
r } z(t|	dd 
 }t||||W 5 d}~X Y nX n:|dkr(t|}n&|dkr<t|}n|dkrNt| }t|||V  qdS )zThis is called with the stream as returned by `tokenize` and wraps
        every token in a :class:`Token` and converts the value.
        r%   r   r&   r    )r"   r#   r(   keywordr   zInvalid character in identifierr   rd   asciibackslashreplacezunicode-escaper;   Nr   r   r   )ignored_tokensr   r   check_identisidentifierr   encodedecode	Exceptionre   stripintr   	operatorsr   )	r|   r   r   r   r   rb   ra   rv   msgr?   r?   rC   r   3  sP      
 "




z
Lexer.wrapc                 c   s@  t |}| }| jr<|r<dD ]}||r|d  q<qd|}d}d}dg}	|dk	r|dkr|dksttd	|	|d
  nd}| j|	d  }
t|}g }|
D ]h\}}}|	||}|dkrq|r|dkrqt
|trt|D ]\}}|jtkr
|||q|dkrdt| D ]4\}}|dk	r |||fV  ||d7 } qq td| q||d }|s|tkr|||fV  ||d7 }qn| }|dkr>|dkr|d nv|dkr|d n`|dkr|d nJ|dkr>|std| |||| }||kr>td||f ||||sN|tkrZ|||fV  ||d7 }| }|dk	r|dkr|	  nT|dkrt| D ]$\}}|dk	r|	|  qqtd| n
|	| | j|	d  }
n||krtd| |} qq||kr dS td|| |f |||qdS )zThis method tokenizes the text and returns the tokens in a
        generator.  Use this method if you just want to tokenize a template.
        )z

r   r   r   rd   r   N)rl   rk   zinvalid stateZ_beginr   )r!   r    r&   r   z?%r wanted to resolve the token dynamically but no group matchedr   r5   r6   r3   r4   r1   r2   )r6   r4   r2   zunexpected '%s'zunexpected '%s', expected '%s'r   zC%r wanted to resolve the new state dynamically but no group matchedz,%r yielded empty string without stack changezunexpected char %r at %d)r   
splitlinesr   endswithrr   r   AssertionErrorrw   rK   r   
isinstancer   	enumerate	__class__ry   r   	groupdictcountRuntimeErrorr   ignore_if_emptyr   popend)r|   r   r   r   r   linesnewlineposr   stackZstatetokensZsource_lengthZbalancing_stackZregextokensZ	new_stater   idxrb   rO   ra   r(   Zexpected_opZpos2r?   r?   rC   r   \  s    











 
 










 zLexer.tokeniter)NNN)NN)NN)	r   r   r   r   r~   r   r   r   r   r?   r?   r?   rC   r     s    

)r   )kr   rF   Zcollections.abcr   collectionsr   r   Zjinja2._compatr   r   r   r   Zjinja2.exceptionsr   Zjinja2.utilsr	   r   r   Ur   r   r   r   SyntaxErrorr   r   Zjinja2r   formatpatternsysmodulesr   rh   Z	TOKEN_ADDZTOKEN_ASSIGNZTOKEN_COLONZTOKEN_COMMAZ	TOKEN_DIVZ	TOKEN_DOTZTOKEN_EQZTOKEN_FLOORDIVZTOKEN_GTZ
TOKEN_GTEQZTOKEN_LBRACEZTOKEN_LBRACKETZTOKEN_LPARENZTOKEN_LTZ
TOKEN_LTEQZ	TOKEN_MODZ	TOKEN_MULZTOKEN_NEZ
TOKEN_PIPEZ	TOKEN_POWZTOKEN_RBRACEZTOKEN_RBRACKETZTOKEN_RPARENZTOKEN_SEMICOLONZ	TOKEN_SUBZTOKEN_TILDEr   r   r   r   r   r   rU   rV   rW   rX   r   r   rQ   rR   rS   rY   rZ   r   r   rT   r[   r   r\   r   dictrP   rK   r   r   rt   r   	frozensetr   r   r_   rc   rg   rj   rx   objectry   r   r   r   r   r   r   r?   r?   r?   rC   <module>   s  




   +^