Source File
js_lexer.go
Belonging Package
github.com/evanw/esbuild/internal/js_lexer
package js_lexer
const (
TEndOfFile T = iota
TSyntaxError
TNoSubstitutionTemplateLiteral // Contents are in lexer.StringLiteral ([]uint16)
TNumericLiteral // Contents are in lexer.Number (float64)
TStringLiteral // Contents are in lexer.StringLiteral ([]uint16)
TBigIntegerLiteral // Contents are in lexer.Identifier (string)
TTemplateHead // Contents are in lexer.StringLiteral ([]uint16)
TTemplateMiddle // Contents are in lexer.StringLiteral ([]uint16)
TTemplateTail // Contents are in lexer.StringLiteral ([]uint16)
TAmpersand
TAmpersandAmpersand
TAsterisk
TAsteriskAsterisk
TAt
TBar
TBarBar
TCaret
TCloseBrace
TCloseBracket
TCloseParen
TColon
TComma
TDot
TDotDotDot
TEqualsEquals
TEqualsEqualsEquals
TEqualsGreaterThan
TExclamation
TExclamationEquals
TExclamationEqualsEquals
TGreaterThan
TGreaterThanEquals
TGreaterThanGreaterThan
TGreaterThanGreaterThanGreaterThan
TLessThan
TLessThanEquals
TLessThanLessThan
TMinus
TMinusMinus
TOpenBrace
TOpenBracket
TOpenParen
TPercent
TPlus
TPlusPlus
TQuestion
TQuestionDot
TQuestionQuestion
TSemicolon
TSlash
TTilde
TAmpersandAmpersandEquals
TAmpersandEquals
TAsteriskAsteriskEquals
TAsteriskEquals
TBarBarEquals
TBarEquals
TCaretEquals
TEquals
TGreaterThanGreaterThanEquals
TGreaterThanGreaterThanGreaterThanEquals
TLessThanLessThanEquals
TMinusEquals
TPercentEquals
TPlusEquals
TQuestionQuestionEquals
TSlashEquals
TIdentifier // Contents are in lexer.Identifier (string)
TEscapedKeyword // A keyword that has been escaped as an identifer
TBreak
TCase
TCatch
TClass
TConst
TContinue
TDebugger
TDefault
TDelete
TDo
TElse
TEnum
TExport
TExtends
TFalse
TFinally
TFor
TFunction
TIf
TImport
TIn
TInstanceof
TNew
TNull
TReturn
TSuper
TSwitch
TThis
TThrow
TTrue
TTry
TTypeof
TVar
TVoid
TWhile
TWith
)
func ( T) () bool {
return >= TAmpersandAmpersandEquals && <= TSlashEquals
}
"break": TBreak,
"case": TCase,
"catch": TCatch,
"class": TClass,
"const": TConst,
"continue": TContinue,
"debugger": TDebugger,
"default": TDefault,
"delete": TDelete,
"do": TDo,
"else": TElse,
"enum": TEnum,
"export": TExport,
"extends": TExtends,
"false": TFalse,
"finally": TFinally,
"for": TFor,
"function": TFunction,
"if": TIf,
"import": TImport,
"in": TIn,
"instanceof": TInstanceof,
"new": TNew,
"null": TNull,
"return": TReturn,
"super": TSuper,
"switch": TSwitch,
"this": TThis,
"throw": TThrow,
"true": TTrue,
"try": TTry,
"typeof": TTypeof,
"var": TVar,
"void": TVoid,
"while": TWhile,
"with": TWith,
}
var StrictModeReservedWords = map[string]bool{
"implements": true,
"interface": true,
"let": true,
"package": true,
"private": true,
"protected": true,
"public": true,
"static": true,
"yield": true,
}
type json struct {
parse bool
allowComments bool
}
type Lexer struct {
log logger.Log
source logger.Source
current int
start int
end int
ApproximateNewlineCount int
LegacyOctalLoc logger.Loc
Token T
HasNewlineBefore bool
HasPureCommentBefore bool
PreserveAllCommentsBefore bool
IsLegacyOctalLiteral bool
CommentsToPreserveBefore []js_ast.Comment
AllOriginalComments []js_ast.Comment
codePoint rune
StringLiteral []uint16
Identifier string
JSXFactoryPragmaComment js_ast.Span
JSXFragmentPragmaComment js_ast.Span
SourceMappingURL js_ast.Span
Number float64
rescanCloseBraceAsTemplateToken bool
forGlobalName bool
json json
prevErrorLoc logger.Loc
IsLogDisabled bool
}
type LexerPanic struct{}
func ( logger.Log, logger.Source) Lexer {
:= Lexer{
log: ,
source: ,
prevErrorLoc: logger.Loc{Start: -1},
}
.step()
.Next()
return
}
func ( logger.Log, logger.Source) Lexer {
:= Lexer{
log: ,
source: ,
prevErrorLoc: logger.Loc{Start: -1},
forGlobalName: true,
}
.step()
.Next()
return
}
func ( logger.Log, logger.Source, bool) Lexer {
:= Lexer{
log: ,
source: ,
prevErrorLoc: logger.Loc{Start: -1},
json: json{
parse: true,
allowComments: ,
},
}
.step()
.Next()
return
}
func ( *Lexer) () logger.Loc {
return logger.Loc{Start: int32(.start)}
}
func ( *Lexer) () logger.Range {
return logger.Range{Loc: logger.Loc{Start: int32(.start)}, Len: int32(.end - .start)}
}
func ( *Lexer) () string {
return .source.Contents[.start:.end]
}
func ( *Lexer) () string {
var string
switch .Token {
if < len() && [] == '\n' {
++
}
= '\n'
}
[] =
++
}
return string([:])
}
func ( *Lexer) () bool {
return .Token >= TIdentifier
}
func ( *Lexer) ( string) bool {
return .Token == TIdentifier && .Raw() ==
}
func ( *Lexer) ( string) {
if !.IsContextualKeyword() {
.ExpectedString(fmt.Sprintf("%q", ))
}
.Next()
}
func ( *Lexer) () {
:= logger.Loc{Start: int32(.end)}
:= "Unexpected end of file"
if .end < len(.source.Contents) {
, := utf8.DecodeRuneInString(.source.Contents[.end:])
if < 0x20 {
= fmt.Sprintf("Syntax error \"\\x%02X\"", )
} else if >= 0x80 {
= fmt.Sprintf("Syntax error \"\\u{%x}\"", )
} else if != '"' {
= fmt.Sprintf("Syntax error \"%c\"", )
} else {
= "Syntax error '\"'"
}
}
.addError(, )
panic(LexerPanic{})
}
func ( *Lexer) ( string) {
:= fmt.Sprintf("%q", .Raw())
if .start == len(.source.Contents) {
= "end of file"
}
.addRangeError(.Range(), fmt.Sprintf("Expected %s but found %s", , ))
panic(LexerPanic{})
}
func ( *Lexer) ( T) {
if , := tokenToString[]; {
.ExpectedString()
} else {
.Unexpected()
}
}
func ( *Lexer) () {
:= fmt.Sprintf("%q", .Raw())
if .start == len(.source.Contents) {
= "end of file"
}
.addRangeError(.Range(), fmt.Sprintf("Unexpected %s", ))
panic(LexerPanic{})
}
func ( *Lexer) ( T) {
if .Token != {
.Expected()
}
.Next()
}
func ( *Lexer) () {
if .Token == TSemicolon || (!.HasNewlineBefore &&
.Token != TCloseBrace && .Token != TEndOfFile) {
.Expect(TSemicolon)
}
}
func ( *Lexer) ( bool) {
switch .Token {
case TLessThan:
if {
.NextInsideJSXElement()
} else {
.Next()
}
case TLessThanEquals:
.Token = TEquals
.start++
case TLessThanLessThan:
.Token = TLessThan
.start++
case TLessThanLessThanEquals:
.Token = TLessThanEquals
.start++
default:
.Expected(TLessThan)
}
}
func ( *Lexer) ( bool) {
switch .Token {
case TGreaterThan:
if {
.NextInsideJSXElement()
} else {
.Next()
}
case TGreaterThanEquals:
.Token = TEquals
.start++
case TGreaterThanGreaterThan:
.Token = TGreaterThan
.start++
case TGreaterThanGreaterThanEquals:
.Token = TGreaterThanEquals
.start++
case TGreaterThanGreaterThanGreaterThan:
.Token = TGreaterThanGreaterThan
.start++
case TGreaterThanGreaterThanGreaterThanEquals:
.Token = TGreaterThanGreaterThanEquals
.start++
default:
.Expected(TGreaterThan)
}
}
func ( string) bool {
if len() == 0 {
return false
}
for , := range {
if == 0 {
if !IsIdentifierStart() {
return false
}
} else {
if !IsIdentifierContinue() {
return false
}
}
}
return true
}
func ( string) string {
if IsIdentifier() {
return
}
:= strings.Builder{}
, := utf8.DecodeRuneInString()
= [:]
if IsIdentifierStart() {
.WriteRune()
} else {
.WriteRune('_')
}
for != "" {
, := utf8.DecodeRuneInString()
= [:]
if IsIdentifierContinue() {
.WriteRune()
} else {
.WriteRune('_')
}
}
return .String()
}
func ( []uint16) bool {
:= len()
if == 0 {
return false
}
for := 0; < ; ++ {
:= rune([])
if >= 0xD800 && <= 0xDBFF && +1 < {
if := rune([+1]); >= 0xDC00 && <= 0xDFFF {
= ( << 10) + + (0x10000 - (0xD800 << 10) - 0xDC00)
++
}
}
if == 0 {
if !IsIdentifierStart() {
return false
}
} else {
if !IsIdentifierContinue() {
return false
}
}
}
return true
}
func ( rune) bool {
switch {
case '_', '$',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return true
}
if < 0x7F {
return false
}
return unicode.Is(idStart, )
}
func ( rune) bool {
switch {
case '_', '$', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
return true
}
if < 0x7F {
return false
}
if == 0x200C || == 0x200D {
return true
}
return unicode.Is(idContinue, )
}
'\u1680', // ogham space mark
'\u2000', // en quad
'\u2001', // em quad
'\u2002', // en space
'\u2003', // em space
'\u2004', // three-per-em space
'\u2005', // four-per-em space
'\u2006', // six-per-em space
'\u2007', // figure space
'\u2008', // punctuation space
'\u2009', // thin space
'\u200A', // hair space
'\u202F', // narrow no-break space
'\u205F', // medium mathematical space
'\u3000', // ideographic space
'\uFEFF': // zero width non-breaking space
return true
default:
return false
}
}
func ( logger.Source, logger.Loc) logger.Range {
:= .Contents[.Start:]
if len() == 0 {
return logger.Range{Loc: , Len: 0}
}
:= 0
, := utf8.DecodeRuneInString([:])
if == '#' {
++
, _ = utf8.DecodeRuneInString([:])
}
for < len() {
, := utf8.DecodeRuneInString([:])
if == '\\' {
+=
return .RangeOfString()
}
func ( *Lexer) ( T) {
if .Token != {
.Expected()
}
.NextJSXElementChild()
}
func ( *Lexer) () {
.HasNewlineBefore = false
:= .end
for {
.start = .end
.Token = 0
switch .codePoint {
case -1: // This indicates the end of the file
.Token = TEndOfFile
case '{':
.step()
.Token = TOpenBrace
case '<':
.step()
.Token = TLessThan
default:
:= false
:
for {
switch .codePoint {
.SyntaxError()
break
if len(.StringLiteral) == 0 {
.HasNewlineBefore = true
continue
}
:= len()
:= make([]uint16, )
for := 0; < ; ++ {
[] = uint16([])
}
.StringLiteral =
}
}
break
}
}
func ( *Lexer) ( T) {
if .Token != {
.Expected()
}
.NextInsideJSXElement()
}
func ( *Lexer) () {
.HasNewlineBefore = false
for {
.start = .end
.Token = 0
switch .codePoint {
case -1: // This indicates the end of the file
.Token = TEndOfFile
case '\r', '\n', '\u2028', '\u2029':
.step()
.HasNewlineBefore = true
continue
case '\t', ' ':
.step()
continue
case '.':
.step()
.Token = TDot
case '=':
.step()
.Token = TEquals
case '{':
.step()
.Token = TOpenBrace
case '}':
.step()
.Token = TCloseBrace
case '<':
.step()
.Token = TLessThan
case '>':
.step()
.Token = TGreaterThan
.step()
switch .codePoint {
case '/':
:
for {
.step()
switch .codePoint {
case '\r', '\n', '\u2028', '\u2029':
break
case -1: // This indicates the end of the file
break
}
}
continue
case '*':
.step()
:= .Range()
:
for {
switch .codePoint {
case '*':
.step()
if .codePoint == '/' {
.step()
break
}
case '\r', '\n', '\u2028', '\u2029':
.step()
.HasNewlineBefore = true
case -1: // This indicates the end of the file
.start = .end
.addErrorWithNotes(.Loc(), "Expected \"*/\" to terminate multi-line comment",
[]logger.MsgData{logger.RangeData(&.source, , "The multi-line comment starts here")})
panic(LexerPanic{})
default:
.step()
}
}
continue
default:
.Token = TSlash
}
case '\'', '"':
:= .codePoint
:= false
.step()
:
for {
switch .codePoint {
case -1: // This indicates the end of the file
.SyntaxError()
case '&':
= true
.step()
case :
.step()
break
.StringLiteral = decodeJSXEntities([]uint16{}, )
:= len()
:= make([]uint16, )
for := 0; < ; ++ {
[] = uint16([])
}
.StringLiteral =
}
if IsWhitespace(.codePoint) {
.step()
continue
}
if IsIdentifierStart(.codePoint) {
.step()
for IsIdentifierContinue(.codePoint) || .codePoint == '-' {
.step()
}
if .codePoint == ':' {
.step()
if IsIdentifierStart(.codePoint) {
.step()
for IsIdentifierContinue(.codePoint) || .codePoint == '-' {
.step()
}
} else {
.addError(logger.Loc{Start: .Range().End()},
fmt.Sprintf("Expected identifier after %q in namespaced JSX name", .Raw()))
}
}
.Identifier = .Raw()
.Token = TIdentifier
break
}
.end = .current
.Token = TSyntaxError
}
return
}
}
func ( *Lexer) () {
.HasNewlineBefore = .end == 0
.HasPureCommentBefore = false
.CommentsToPreserveBefore = nil
for {
.start = .end
.Token = 0
switch .codePoint {
case -1: // This indicates the end of the file
.Token = TEndOfFile
case '#':
.step()
if .codePoint == '\\' {
.Identifier, _ = .scanIdentifierWithEscapes(privateIdentifier)
} else {
if !IsIdentifierStart(.codePoint) {
.SyntaxError()
}
.step()
for IsIdentifierContinue(.codePoint) {
.step()
}
if .codePoint == '\\' {
.Identifier, _ = .scanIdentifierWithEscapes(privateIdentifier)
} else {
.Identifier = .Raw()
}
}
.Token = TPrivateIdentifier
}
case '\r', '\n', '\u2028', '\u2029':
.step()
.HasNewlineBefore = true
continue
case '\t', ' ':
.step()
continue
case '(':
.step()
.Token = TOpenParen
case ')':
.step()
.Token = TCloseParen
case '[':
.step()
.Token = TOpenBracket
case ']':
.step()
.Token = TCloseBracket
case '{':
.step()
.Token = TOpenBrace
case '}':
.step()
.Token = TCloseBrace
case ',':
.step()
.Token = TComma
case ':':
.step()
.Token = TColon
case ';':
.step()
.Token = TSemicolon
case '@':
.step()
.Token = TAt
case '~':
.step()
.Token = TTilde
.step()
switch .codePoint {
case '=':
.step()
.Token = TAmpersandEquals
case '&':
.step()
switch .codePoint {
case '=':
.step()
.Token = TAmpersandAmpersandEquals
default:
.Token = TAmpersandAmpersand
}
default:
.Token = TAmpersand
}
if .codePoint == '>' && .HasNewlineBefore {
.step()
.log.AddRangeWarning(&.source, .Range(),
"Treating \"-->\" as the start of a legacy HTML single-line comment")
:
for {
switch .codePoint {
case '\r', '\n', '\u2028', '\u2029':
break
case -1: // This indicates the end of the file
break
}
.step()
}
continue
}
.Token = TMinusMinus
default:
.Token = TMinus
}
.step()
switch .codePoint {
case '=':
.step()
.Token = TAsteriskEquals
case '*':
.step()
switch .codePoint {
case '=':
.step()
.Token = TAsteriskAsteriskEquals
default:
.Token = TAsteriskAsterisk
}
default:
.Token = TAsterisk
}
.step()
if .forGlobalName {
.Token = TSlash
break
}
switch .codePoint {
case '=':
.step()
.Token = TSlashEquals
break
case '/':
:
for {
.step()
switch .codePoint {
case '\r', '\n', '\u2028', '\u2029':
break
case -1: // This indicates the end of the file
break
}
}
if .json.parse && !.json.allowComments {
.addRangeError(.Range(), "JSON does not support comments")
}
.scanCommentText()
continue
case '*':
.step()
:= .Range()
:
for {
switch .codePoint {
case '*':
.step()
if .codePoint == '/' {
.step()
break
}
case '\r', '\n', '\u2028', '\u2029':
.step()
.HasNewlineBefore = true
case -1: // This indicates the end of the file
.start = .end
.addErrorWithNotes(.Loc(), "Expected \"*/\" to terminate multi-line comment",
[]logger.MsgData{logger.RangeData(&.source, , "The multi-line comment starts here")})
panic(LexerPanic{})
default:
.step()
}
}
if .json.parse && !.json.allowComments {
.addRangeError(.Range(), "JSON does not support comments")
}
.scanCommentText()
continue
default:
.Token = TSlash
}
.step()
switch .codePoint {
case '>':
.step()
.Token = TEqualsGreaterThan
case '=':
.step()
switch .codePoint {
case '=':
.step()
.Token = TEqualsEqualsEquals
default:
.Token = TEqualsEquals
}
default:
.Token = TEquals
}
.step()
switch .codePoint {
case '=':
.step()
.Token = TLessThanEquals
case '<':
.step()
switch .codePoint {
case '=':
.step()
.Token = TLessThanLessThanEquals
default:
.Token = TLessThanLessThan
}
case '!':
if strings.HasPrefix(.source.Contents[.start:], "<!--") {
.step()
.step()
.step()
.log.AddRangeWarning(&.source, .Range(),
"Treating \"<!--\" as the start of a legacy HTML single-line comment")
:
for {
switch .codePoint {
case '\r', '\n', '\u2028', '\u2029':
break
case -1: // This indicates the end of the file
break
}
.step()
}
continue
}
.Token = TLessThan
default:
.Token = TLessThan
}
.step()
switch .codePoint {
case '=':
.step()
.Token = TGreaterThanEquals
case '>':
.step()
switch .codePoint {
case '=':
.step()
.Token = TGreaterThanGreaterThanEquals
case '>':
.step()
switch .codePoint {
case '=':
.step()
.Token = TGreaterThanGreaterThanGreaterThanEquals
default:
.Token = TGreaterThanGreaterThanGreaterThan
}
default:
.Token = TGreaterThanGreaterThan
}
default:
.Token = TGreaterThan
}
.step()
switch .codePoint {
case '=':
.step()
switch .codePoint {
case '=':
.step()
.Token = TExclamationEqualsEquals
default:
.Token = TExclamationEquals
}
default:
.Token = TExclamation
}
case '\'', '"', '`':
:= .codePoint
:= false
:= 1
if != '`' {
.Token = TStringLiteral
} else if .rescanCloseBraceAsTemplateToken {
.Token = TTemplateTail
} else {
.Token = TNoSubstitutionTemplateLiteral
}
.step()
:
for {
switch .codePoint {
case '\\':
= true
.step()
if .codePoint == '\r' && !.json.parse {
.step()
if .codePoint == '\n' {
.step()
}
continue
}
case -1: // This indicates the end of the file
.addError(logger.Loc{Start: int32(.end)}, "Unterminated string literal")
panic(LexerPanic{})
case '\r':
if != '`' {
.addError(logger.Loc{Start: int32(.end)}, "Unterminated string literal")
panic(LexerPanic{})
}
= true
case '\n':
if != '`' {
.addError(logger.Loc{Start: int32(.end)}, "Unterminated string literal")
panic(LexerPanic{})
}
case '$':
if == '`' {
.step()
if .codePoint == '{' {
= 2
.step()
if .rescanCloseBraceAsTemplateToken {
.Token = TTemplateMiddle
} else {
.Token = TTemplateHead
}
break
}
continue
}
case :
.step()
break
.StringLiteral = .decodeEscapeSequences(.start+1, )
:= len()
:= make([]uint16, )
for := 0; < ; ++ {
[] = uint16([])
}
.StringLiteral =
}
if == '\'' && .json.parse {
.addRangeError(.Range(), "JSON strings must use double quotes")
}
case '_', '$',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
.step()
for IsIdentifierContinue(.codePoint) {
.step()
}
if .codePoint == '\\' {
.Identifier, .Token = .scanIdentifierWithEscapes(normalIdentifier)
} else {
:= .Raw()
.Identifier =
.Token = Keywords[]
if .Token == 0 {
.Token = TIdentifier
}
}
case '\\':
.Identifier, .Token = .scanIdentifierWithEscapes(normalIdentifier)
case '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
.parseNumericLiteralOrDot()
if IsWhitespace(.codePoint) {
.step()
continue
}
if IsIdentifierStart(.codePoint) {
.step()
for IsIdentifierContinue(.codePoint) {
.step()
}
if .codePoint == '\\' {
.Identifier, .Token = .scanIdentifierWithEscapes(normalIdentifier)
} else {
.Token = TIdentifier
.Identifier = .Raw()
}
break
}
.end = .current
.Token = TSyntaxError
}
return
}
}
type identifierKind uint8
const (
normalIdentifier identifierKind = iota
privateIdentifier
)
if .codePoint == '\\' {
.step()
if .codePoint != 'u' {
.SyntaxError()
}
.step()
for := 0; < 4; ++ {
switch .codePoint {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f',
'A', 'B', 'C', 'D', 'E', 'F':
.step()
default:
.SyntaxError()
}
}
}
continue
}
if !IsIdentifierContinue(.codePoint) {
break
}
.step()
}
if Keywords[] != 0 {
return , TEscapedKeyword
} else {
return , TIdentifier
}
}
.Token = TDot
return
}
:= 0
:= 0
:= == '.'
:= 0.0
.IsLegacyOctalLiteral = false
if == '0' {
switch .codePoint {
case 'b', 'B':
= 2
case 'o', 'O':
= 8
case 'x', 'X':
= 16
case '0', '1', '2', '3', '4', '5', '6', '7', '_':
= 8
.IsLegacyOctalLiteral = true
}
}
if > 0 && .end == +1 {
.SyntaxError()
}
if || .IsLegacyOctalLiteral {
.SyntaxError()
}
= .end
++
case '0', '1':
.Number = .Number* + float64(.codePoint-'0')
case '2', '3', '4', '5', '6', '7':
if == 2 {
.SyntaxError()
}
.Number = .Number* + float64(.codePoint-'0')
case '8', '9':
if .IsLegacyOctalLiteral {
= true
} else if < 10 {
.SyntaxError()
}
.Number = .Number* + float64(.codePoint-'0')
case 'A', 'B', 'C', 'D', 'E', 'F':
if != 16 {
.SyntaxError()
}
.Number = .Number* + float64(.codePoint+10-'A')
case 'a', 'b', 'c', 'd', 'e', 'f':
if != 16 {
.SyntaxError()
}
.Number = .Number* + float64(.codePoint+10-'a')
if {
.SyntaxError()
}
break
}
.step()
= false
}
:= .codePoint == 'n' && !
if || {
:= .Raw()
if && .IsLegacyOctalLiteral {
.SyntaxError()
}
if {
.Identifier =
, := strconv.ParseFloat(, 64)
.Number =
}
}
if > 0 && .end == +1 {
.SyntaxError()
}
if {
.SyntaxError()
}
= .end
++
}
.step()
}
if > 0 && .end == +1 {
.end--
.SyntaxError()
}
= true
.step()
if .codePoint == '_' {
.SyntaxError()
}
for {
if .codePoint < '0' || .codePoint > '9' {
if .codePoint != '_' {
break
}
if > 0 && .end == +1 {
.SyntaxError()
}
= .end
++
}
.step()
}
}
if > 0 && .end == +1 {
.SyntaxError()
}
= .end
++
}
.step()
}
}
:= .Raw()
if len() > 1 && == '0' {
.SyntaxError()
}
.Identifier =
, := strconv.ParseFloat(, 64)
.Number =
}
}
if > 0 && .end == +1 {
.end--
.SyntaxError()
}
if .codePoint == 'n' && ! {
.Token = TBigIntegerLiteral
.step()
}
if IsIdentifierStart(.codePoint) {
.SyntaxError()
}
}
func ( *Lexer) () {
:= func() {
if .codePoint == '\\' {
.step()
}
switch .codePoint {
.SyntaxError()
case -1: // This indicates the end of the file
.SyntaxError()
default:
.step()
}
}
for {
switch .codePoint {
case '/':
.step()
for IsIdentifierContinue(.codePoint) {
switch .codePoint {
case 'g', 'i', 'm', 's', 'u', 'y':
.step()
default:
.SyntaxError()
}
}
return
case '[':
.step()
for .codePoint != ']' {
()
}
.step()
default:
()
}
}
}
func ( []uint16, string) []uint16 {
:= 0
for < len() {
, := utf8.DecodeRuneInString([:])
+=
if == '&' {
:= strings.IndexByte([:], ';')
if > 0 {
:= [ : +]
if [0] == '#' {
:= [1:]
:= 10
if len() > 1 && [0] == 'x' {
= [1:]
= 16
}
if , := strconv.ParseInt(, , 32); == nil {
= rune()
+= + 1
}
} else if , := jsxEntity[]; {
=
+= + 1
}
}
}
if <= 0xFFFF {
= append(, uint16())
} else {
-= 0x10000
= append(, uint16(0xD800+((>>10)&0x3FF)), uint16(0xDC00+(&0x3FF)))
}
}
return
}
func ( string) []uint16 {
:= -1
:= []uint16{}
:= 0
:= 0
for < len() {
, := utf8.DecodeRuneInString([:])
switch {
= decodeJSXEntities(, [:])
}
= -1
if !IsWhitespace() {
= +
if == -1 {
=
}
}
}
+=
}
if != -1 {
if len() > 0 {
= append(, ' ')
}
= decodeJSXEntities(, [:])
}
return
}
func ( *Lexer) ( int, string) []uint16 {
:= []uint16{}
:= 0
for < len() {
, := utf8.DecodeRuneInString([:])
+=
switch {
if < len() && [] == '\n' {
++
}
= append(, '\n')
continue
case '\\':
, := utf8.DecodeRuneInString([:])
+=
switch {
case 'b':
= append(, '\b')
continue
case 'f':
= append(, '\f')
continue
case 'n':
= append(, '\n')
continue
case 'r':
= append(, '\r')
continue
case 't':
= append(, '\t')
continue
case 'v':
if .json.parse {
.end = + -
.SyntaxError()
}
= append(, '\v')
continue
case '0', '1', '2', '3', '4', '5', '6', '7':
:= - 2
if .json.parse {
.end = + -
.SyntaxError()
}
:= false
:= - '0'
, := utf8.DecodeRuneInString([:])
switch {
case '0', '1', '2', '3', '4', '5', '6', '7':
= *8 + - '0'
+=
, := utf8.DecodeRuneInString([:])
switch {
case '0', '1', '2', '3', '4', '5', '6', '7':
:= *8 + - '0'
if < 256 {
=
+=
}
case '8', '9':
= true
}
case '8', '9':
= true
}
=
if || [:] != "\\0" {
.LegacyOctalLoc = logger.Loc{Start: int32( + )}
}
case '8', '9':
=
.LegacyOctalLoc = logger.Loc{Start: int32( + - 2)}
case 'x':
if .json.parse {
.end = + -
.SyntaxError()
}
:= '\000'
for := 0; < 2; ++ {
, := utf8.DecodeRuneInString([:])
+=
switch {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
= *16 | ( - '0')
case 'a', 'b', 'c', 'd', 'e', 'f':
= *16 | ( + 10 - 'a')
case 'A', 'B', 'C', 'D', 'E', 'F':
= *16 | ( + 10 - 'A')
default:
.end = + -
.SyntaxError()
}
}
=
:= '\000'
, := utf8.DecodeRuneInString([:])
+=
if == '{' {
if .json.parse {
.end = + -
.SyntaxError()
}
:= - - -
:= true
:= false
:
for {
, = utf8.DecodeRuneInString([:])
+=
switch {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
= *16 | ( - '0')
case 'a', 'b', 'c', 'd', 'e', 'f':
= *16 | ( + 10 - 'a')
case 'A', 'B', 'C', 'D', 'E', 'F':
= *16 | ( + 10 - 'A')
case '}':
if {
.end = + -
.SyntaxError()
}
break
default:
.end = + -
.SyntaxError()
}
if > utf8.MaxRune {
= true
}
= false
}
if {
.addRangeError(logger.Range{Loc: logger.Loc{Start: int32( + )}, Len: int32( - )},
"Unicode escape sequence is out of range")
panic(LexerPanic{})
}
for := 0; < 4; ++ {
switch {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
= *16 | ( - '0')
case 'a', 'b', 'c', 'd', 'e', 'f':
= *16 | ( + 10 - 'a')
case 'A', 'B', 'C', 'D', 'E', 'F':
= *16 | ( + 10 - 'A')
default:
.end = + -
.SyntaxError()
}
if < 3 {
, = utf8.DecodeRuneInString([:])
+=
}
}
}
=
case '\r':
if .json.parse {
.end = + -
.SyntaxError()
}
++
}
continue
case '\n', '\u2028', '\u2029':
if .json.parse {
.end = + -
.SyntaxError()
}
continue
default:
if .json.parse {
switch {
case '"', '\\', '/':
default:
.end = + -
.SyntaxError()
}
}
=
}
}
if <= 0xFFFF {
= append(, uint16())
} else {
-= 0x10000
= append(, uint16(0xD800+((>>10)&0x3FF)), uint16(0xDC00+(&0x3FF)))
}
}
return
}
func ( *Lexer) () {
if .Token != TCloseBrace {
.Expected(TCloseBrace)
}
.rescanCloseBraceAsTemplateToken = true
.codePoint = '`'
.current = .end
.end -= 1
.Next()
.rescanCloseBraceAsTemplateToken = false
}
func ( *Lexer) () {
, := utf8.DecodeRuneInString(.source.Contents[.current:])
if == 0 {
= -1
}
if == '\n' {
.ApproximateNewlineCount++
}
.codePoint =
.end = .current
.current +=
}
if == .prevErrorLoc {
return
}
.prevErrorLoc =
if !.IsLogDisabled {
.log.AddError(&.source, , )
}
}
if == .prevErrorLoc {
return
}
.prevErrorLoc =
if !.IsLogDisabled {
.log.AddErrorWithNotes(&.source, , , )
}
}
if .Loc == .prevErrorLoc {
return
}
.prevErrorLoc = .Loc
if !.IsLogDisabled {
.log.AddRangeError(&.source, , )
}
}
func ( string, string) bool {
:= len()
:= len()
if >= && [0:] == {
if == {
return true
}
, := utf8.DecodeRuneInString([:])
if !IsIdentifierContinue() {
return true
}
}
return false
}
type pragmaArg uint8
const (
pragmaNoSpaceFirst pragmaArg = iota
pragmaSkipSpaceFirst
)
func ( pragmaArg, int, string, string) (js_ast.Span, bool) {
= [len():]
+= len()
if == "" {
return js_ast.Span{}, false
}
, := utf8.DecodeRuneInString()
if == pragmaSkipSpaceFirst {
if !IsWhitespace() {
return js_ast.Span{}, false
}
for IsWhitespace() {
= [:]
+=
if == "" {
return js_ast.Span{}, false
}
, = utf8.DecodeRuneInString()
}
}
:= 0
for !IsWhitespace() {
+=
if >= len() {
break
}
, = utf8.DecodeRuneInString([:])
if IsWhitespace() {
break
}
}
return js_ast.Span{
Text: [:],
Range: logger.Range{
Loc: logger.Loc{Start: int32()},
Len: int32(),
},
}, true
}
func ( *Lexer) () {
:= .source.Contents[.start:.end]
:= len() > 2 && [2] == '!'
:= [1] == '*'
:= len()
if {
-= 2
}
for , := 0, len(); < ; ++ {
switch [] {
case '#':
:= [+1 : ]
if hasPrefixWithWordBoundary(, "__PURE__") {
.HasPureCommentBefore = true
} else if strings.HasPrefix(, " sourceMappingURL=") {
if , := scanForPragmaArg(pragmaNoSpaceFirst, .start++1, " sourceMappingURL=", ); {
.SourceMappingURL =
}
}
case '@':
:= [+1 : ]
if hasPrefixWithWordBoundary(, "__PURE__") {
.HasPureCommentBefore = true
} else if hasPrefixWithWordBoundary(, "preserve") || hasPrefixWithWordBoundary(, "license") {
= true
} else if hasPrefixWithWordBoundary(, "jsx") {
if , := scanForPragmaArg(pragmaSkipSpaceFirst, .start++1, "jsx", ); {
.JSXFactoryPragmaComment =
}
} else if hasPrefixWithWordBoundary(, "jsxFrag") {
if , := scanForPragmaArg(pragmaSkipSpaceFirst, .start++1, "jsxFrag", ); {
.JSXFragmentPragmaComment =
}
} else if strings.HasPrefix(, " sourceMappingURL=") {
if , := scanForPragmaArg(pragmaNoSpaceFirst, .start++1, " sourceMappingURL=", ); {
.SourceMappingURL =
}
}
}
}
if || .PreserveAllCommentsBefore {
if {
= removeMultiLineCommentIndent(.source.Contents[:.start], )
}
.CommentsToPreserveBefore = append(.CommentsToPreserveBefore, js_ast.Comment{
Loc: logger.Loc{Start: int32(.start)},
Text: ,
})
}
}
:= 0
:
for len() > 0 {
, := utf8.DecodeLastRuneInString()
switch {
case '\r', '\n', '\u2028', '\u2029':
break
}
= [:len()-]
++
}
var []string
:= 0
for , := range {
switch {
if <= {
= append(, [:])
}
= + 1
for , := range [1:] {
:= 0
for , := range {
if !IsWhitespace() {
break
}
++
}
if > {
=
}
}
if := [+1]; >= 0xDC00 && <= 0xDFFF {
return true
}
}
}
}
return false
}
func ( string) []uint16 {
:= []uint16{}
for , := range {
if <= 0xFFFF {
= append(, uint16())
} else {
-= 0x10000
= append(, uint16(0xD800+((>>10)&0x3FF)), uint16(0xDC00+(&0x3FF)))
}
}
return
}
func ( []uint16) string {
:= make([]byte, utf8.UTFMax)
:= strings.Builder{}
:= len()
for := 0; < ; ++ {
:= rune([])
if utf16.IsSurrogate() && +1 < {
:= rune([+1])
= (-0xD800)<<10 | ( - 0xDC00) + 0x10000
++
}
:= encodeWTF8Rune(, )
.Write([:])
}
return .String()
}
return false
}
:= [utf8.UTFMax]byte{}
:= len()
:= 0
for := 0; < ; ++ {
:= rune([])
if utf16.IsSurrogate() && +1 < {
:= rune([+1])
= (-0xD800)<<10 | ( - 0xDC00) + 0x10000
++
}
:= encodeWTF8Rune([:], )
if + > len() {
return false
}
for := 0; < ; ++ {
if [] != [] {
return false
}
++
}
}
return == len()
}
func ( []uint16, []uint16) bool {
if len() == len() {
for , := range {
if != [] {
return false
}
}
return true
}
return false
}
switch := uint32(); {
case <= 0x7F:
[0] = byte()
return 1
case <= 0x7FF:
_ = [1] // eliminate bounds checks
[0] = 0xC0 | byte(>>6)
[1] = 0x80 | byte()&0x3F
return 2
case > utf8.MaxRune:
= utf8.RuneError
fallthrough
case <= 0xFFFF:
_ = [2] // eliminate bounds checks
[0] = 0xE0 | byte(>>12)
[1] = 0x80 | byte(>>6)&0x3F
[2] = 0x80 | byte()&0x3F
return 3
default:
_ = [3] // eliminate bounds checks
[0] = 0xF0 | byte(>>18)
[1] = 0x80 | byte(>>12)&0x3F
[2] = 0x80 | byte(>>6)&0x3F
[3] = 0x80 | byte()&0x3F
return 4
}
}
func ( string) (rune, int) {
:= len()
if < 1 {
return utf8.RuneError, 0
}
:= [0]
if < 0x80 {
return rune(), 1
}
var int
if ( & 0xE0) == 0xC0 {
= 2
} else if ( & 0xF0) == 0xE0 {
= 3
} else if ( & 0xF8) == 0xF0 {
= 4
} else {
return utf8.RuneError, 1
}
if < {
return utf8.RuneError, 0
}
:= [1]
if ( & 0xC0) != 0x80 {
return utf8.RuneError, 1
}
if == 2 {
:= rune(&0x1F)<<6 | rune(&0x3F)
if < 0x80 {
return utf8.RuneError, 1
}
return , 2
}
:= [2]
if ( & 0xC0) != 0x80 {
return utf8.RuneError, 1
}
if == 3 {
:= rune(&0x0F)<<12 | rune(&0x3F)<<6 | rune(&0x3F)
if < 0x0800 {
return utf8.RuneError, 1
}
return , 3
}
:= [3]
if ( & 0xC0) != 0x80 {
return utf8.RuneError, 1
}
:= rune(&0x07)<<18 | rune(&0x3F)<<12 | rune(&0x3F)<<6 | rune(&0x3F)
if < 0x010000 || > 0x10FFFF {
return utf8.RuneError, 1
}
return , 4
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |