package jmespath

import (
	
	
	
	
	
	
)

type token struct {
	tokenType tokType
	value     string
	position  int
	length    int
}

type tokType int

const eof = -1
Lexer contains information about the expression being tokenized.
type Lexer struct {
	expression string       // The expression provided by the user.
	currentPos int          // The current position in the string.
	lastWidth  int          // The width of the current rune.  This
	buf        bytes.Buffer // Internal buffer used for building up values.
}
SyntaxError is the main error used whenever a lexing or parsing error occurs.
type SyntaxError struct {
	msg        string // Error message displayed to user
	Expression string // Expression that generated a SyntaxError
	Offset     int    // The location in the string where the error occurred
}

In the future, it would be good to underline the specific location where the error occurred.
	return "SyntaxError: " + .msg
}
HighlightLocation will show where the syntax error occurred. It will place a "^" character on a line below the expression at the point where the syntax error occurred.
func ( SyntaxError) () string {
	return .Expression + "\n" + strings.Repeat(" ", .Offset) + "^"
}
go:generate stringer -type=tokType
Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. When using this bitmask just be sure to shift the rune down 64 bits before checking against identifierStartBits.
const identifierStartBits uint64 = 576460745995190270
Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}

var whiteSpace = map[rune]bool{
	' ': true, '\t': true, '\n': true, '\r': true,
}

func ( token) () string {
	return fmt.Sprintf("Token{%+v, %s, %d, %d}",
		.tokenType, .value, .position, .length)
}
NewLexer creates a new JMESPath lexer.
func () *Lexer {
	 := Lexer{}
	return &
}

func ( *Lexer) () rune {
	if .currentPos >= len(.expression) {
		.lastWidth = 0
		return eof
	}
	,  := utf8.DecodeRuneInString(.expression[.currentPos:])
	.lastWidth = 
	.currentPos += 
	return 
}

func ( *Lexer) () {
	.currentPos -= .lastWidth
}

func ( *Lexer) () rune {
	 := .next()
	.back()
	return 
}
tokenize takes an expression and returns corresponding tokens.
func ( *Lexer) ( string) ([]token, error) {
	var  []token
	.expression = 
	.currentPos = 0
	.lastWidth = 0
:
	for {
		 := .next()
		if identifierStartBits&(1<<(uint64()-64)) > 0 {
			 := .consumeUnquotedIdentifier()
			 = append(, )
Basic single char token.
			 := token{
				tokenType: ,
				value:     string(),
				position:  .currentPos - .lastWidth,
				length:    1,
			}
			 = append(, )
		} else if  == '-' || ( >= '0' &&  <= '9') {
			 := .consumeNumber()
			 = append(, )
		} else if  == '[' {
			 := .consumeLBracket()
			 = append(, )
		} else if  == '"' {
			,  := .consumeQuotedIdentifier()
			if  != nil {
				return , 
			}
			 = append(, )
		} else if  == '\'' {
			,  := .consumeRawStringLiteral()
			if  != nil {
				return , 
			}
			 = append(, )
		} else if  == '`' {
			,  := .consumeLiteral()
			if  != nil {
				return , 
			}
			 = append(, )
		} else if  == '|' {
			 := .matchOrElse(, '|', tOr, tPipe)
			 = append(, )
		} else if  == '<' {
			 := .matchOrElse(, '=', tLTE, tLT)
			 = append(, )
		} else if  == '>' {
			 := .matchOrElse(, '=', tGTE, tGT)
			 = append(, )
		} else if  == '!' {
			 := .matchOrElse(, '=', tNE, tNot)
			 = append(, )
		} else if  == '=' {
			 := .matchOrElse(, '=', tEQ, tUnknown)
			 = append(, )
		} else if  == '&' {
			 := .matchOrElse(, '&', tAnd, tExpref)
			 = append(, )
		} else if  == eof {
			break 
Ignore whitespace
		} else {
			return , .syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII()))
		}
	}
	 = append(, token{tEOF, "", len(.expression), 0})
	return , nil
}
Consume characters until the ending rune "r" is reached. If the end of the expression is reached before seeing the terminating rune "r", then an error is returned. If no error occurs then the matching substring is returned. The returned string will not include the ending rune.
func ( *Lexer) ( rune) (string, error) {
	 := .currentPos
	 := .next()
	for  !=  &&  != eof {
		if  == '\\' && .peek() != eof {
			.next()
		}
		 = .next()
	}
Then we hit an EOF so we never reached the closing delimiter.
		return "", SyntaxError{
			msg:        "Unclosed delimiter: " + string(),
			Expression: .expression,
			Offset:     len(.expression),
		}
	}
	return .expression[ : .currentPos-.lastWidth], nil
}

func ( *Lexer) () (token, error) {
	 := .currentPos
	,  := .consumeUntil('`')
	if  != nil {
		return token{}, 
	}
	 = strings.Replace(, "\\`", "`", -1)
	return token{
		tokenType: tJSONLiteral,
		value:     ,
		position:  ,
		length:    len(),
	}, nil
}

func ( *Lexer) () (token, error) {
	 := .currentPos
	 := 
	 := .next()
	for  != '\'' && .peek() != eof {
		if  == '\\' && .peek() == '\'' {
			 := .expression[ : .currentPos-1]
			.buf.WriteString()
			.buf.WriteString("'")
			.next()
			 = .currentPos
		}
		 = .next()
	}
Then we hit an EOF so we never reached the closing delimiter.
		return token{}, SyntaxError{
			msg:        "Unclosed delimiter: '",
			Expression: .expression,
			Offset:     len(.expression),
		}
	}
	if  < .currentPos {
		.buf.WriteString(.expression[ : .currentPos-1])
	}
Reset the buffer so it can reused again.
	.buf.Reset()
	return token{
		tokenType: tStringLiteral,
		value:     ,
		position:  ,
		length:    len(),
	}, nil
}

func ( *Lexer) ( string) SyntaxError {
	return SyntaxError{
		msg:        ,
		Expression: .expression,
		Offset:     .currentPos - 1,
	}
}
Checks for a two char token, otherwise matches a single character token. This is used whenever a two char token overlaps a single char token, e.g. "||" -> tPipe, "|" -> tOr.
func ( *Lexer) ( rune,  rune,  tokType,  tokType) token {
	 := .currentPos - .lastWidth
	 := .next()
	var  token
	if  ==  {
		 = token{
			tokenType: ,
			value:     string() + string(),
			position:  ,
			length:    2,
		}
	} else {
		.back()
		 = token{
			tokenType: ,
			value:     string(),
			position:  ,
			length:    1,
		}
	}
	return 
}

There's three options here: 1. A filter expression "[?" 2. A flatten operator "[]" 3. A bare rbracket "["
	 := .currentPos - .lastWidth
	 := .next()
	var  token
	if  == '?' {
		 = token{
			tokenType: tFilter,
			value:     "[?",
			position:  ,
			length:    2,
		}
	} else if  == ']' {
		 = token{
			tokenType: tFlatten,
			value:     "[]",
			position:  ,
			length:    2,
		}
	} else {
		 = token{
			tokenType: tLbracket,
			value:     "[",
			position:  ,
			length:    1,
		}
		.back()
	}
	return 
}

func ( *Lexer) () (token, error) {
	 := .currentPos
	,  := .consumeUntil('"')
	if  != nil {
		return token{}, 
	}
	var  string
	 := []byte("\"" +  + "\"")
	if  := json.Unmarshal([]byte(), &);  != nil {
		return token{}, 
	}
	return token{
		tokenType: tQuotedIdentifier,
		value:     ,
		position:   - 1,
		length:    len(),
	}, nil
}

Consume runes until we reach the end of an unquoted identifier.
	 := .currentPos - .lastWidth
	for {
		 := .next()
		if  < 0 ||  > 128 || identifierTrailingBits[uint64()/64]&(1<<(uint64()%64)) == 0 {
			.back()
			break
		}
	}
	 := .expression[:.currentPos]
	return token{
		tokenType: tUnquotedIdentifier,
		value:     ,
		position:  ,
		length:    .currentPos - ,
	}
}

Consume runes until we reach something that's not a number.
	 := .currentPos - .lastWidth
	for {
		 := .next()
		if  < '0' ||  > '9' {
			.back()
			break
		}
	}
	 := .expression[:.currentPos]
	return token{
		tokenType: tNumber,
		value:     ,
		position:  ,
		length:    .currentPos - ,
	}