package js_lexer
The lexer converts a source file to a stream of tokens. Unlike many compilers, esbuild does not run the lexer to completion before the parser is started. Instead, the lexer is called repeatedly by the parser as the parser parses the file. This is because many tokens are context-sensitive and need high-level information from the parser. Examples are regular expression literals and JSX elements. For efficiency, the text associated with textual tokens is stored in two separate ways depending on the token. Identifiers use UTF-8 encoding which allows them to be slices of the input file without allocating extra memory. Strings use UTF-16 encoding so they can represent unicode surrogates accurately.

import (
	
	
	
	
	
	

	
	
)

type T uint
If you add a new token, remember to add it to "tokenToString" too
"#!/usr/bin/env node"
Literals
	TNoSubstitutionTemplateLiteral // Contents are in lexer.StringLiteral ([]uint16)
	TNumericLiteral                // Contents are in lexer.Number (float64)
	TStringLiteral                 // Contents are in lexer.StringLiteral ([]uint16)
	TBigIntegerLiteral             // Contents are in lexer.Identifier (string)
Pseudo-literals
	TTemplateHead   // Contents are in lexer.StringLiteral ([]uint16)
	TTemplateMiddle // Contents are in lexer.StringLiteral ([]uint16)
	TTemplateTail   // Contents are in lexer.StringLiteral ([]uint16)
Class-private fields and methods
Identifiers
	TIdentifier     // Contents are in lexer.Identifier (string)
	TEscapedKeyword // A keyword that has been escaped as an identifer
Reserved words
	"break":      TBreak,
	"case":       TCase,
	"catch":      TCatch,
	"class":      TClass,
	"const":      TConst,
	"continue":   TContinue,
	"debugger":   TDebugger,
	"default":    TDefault,
	"delete":     TDelete,
	"do":         TDo,
	"else":       TElse,
	"enum":       TEnum,
	"export":     TExport,
	"extends":    TExtends,
	"false":      TFalse,
	"finally":    TFinally,
	"for":        TFor,
	"function":   TFunction,
	"if":         TIf,
	"import":     TImport,
	"in":         TIn,
	"instanceof": TInstanceof,
	"new":        TNew,
	"null":       TNull,
	"return":     TReturn,
	"super":      TSuper,
	"switch":     TSwitch,
	"this":       TThis,
	"throw":      TThrow,
	"true":       TTrue,
	"try":        TTry,
	"typeof":     TTypeof,
	"var":        TVar,
	"void":       TVoid,
	"while":      TWhile,
	"with":       TWith,
}

var StrictModeReservedWords = map[string]bool{
	"implements": true,
	"interface":  true,
	"let":        true,
	"package":    true,
	"private":    true,
	"protected":  true,
	"public":     true,
	"static":     true,
	"yield":      true,
}

type json struct {
	parse         bool
	allowComments bool
}

type Lexer struct {
	log                             logger.Log
	source                          logger.Source
	current                         int
	start                           int
	end                             int
	ApproximateNewlineCount         int
	LegacyOctalLoc                  logger.Loc
	Token                           T
	HasNewlineBefore                bool
	HasPureCommentBefore            bool
	PreserveAllCommentsBefore       bool
	IsLegacyOctalLiteral            bool
	CommentsToPreserveBefore        []js_ast.Comment
	AllOriginalComments             []js_ast.Comment
	codePoint                       rune
	StringLiteral                   []uint16
	Identifier                      string
	JSXFactoryPragmaComment         js_ast.Span
	JSXFragmentPragmaComment        js_ast.Span
	SourceMappingURL                js_ast.Span
	Number                          float64
	rescanCloseBraceAsTemplateToken bool
	forGlobalName                   bool
	json                            json
	prevErrorLoc                    logger.Loc
The log is disabled during speculative scans that may backtrack
	IsLogDisabled bool
}

type LexerPanic struct{}

func ( logger.Log,  logger.Source) Lexer {
	 := Lexer{
		log:          ,
		source:       ,
		prevErrorLoc: logger.Loc{Start: -1},
	}
	.step()
	.Next()
	return 
}

func ( logger.Log,  logger.Source) Lexer {
	 := Lexer{
		log:           ,
		source:        ,
		prevErrorLoc:  logger.Loc{Start: -1},
		forGlobalName: true,
	}
	.step()
	.Next()
	return 
}

func ( logger.Log,  logger.Source,  bool) Lexer {
	 := Lexer{
		log:          ,
		source:       ,
		prevErrorLoc: logger.Loc{Start: -1},
		json: json{
			parse:         true,
			allowComments: ,
		},
	}
	.step()
	.Next()
	return 
}

func ( *Lexer) () logger.Loc {
	return logger.Loc{Start: int32(.start)}
}

func ( *Lexer) () logger.Range {
	return logger.Range{Loc: logger.Loc{Start: int32(.start)}, Len: int32(.end - .start)}
}

func ( *Lexer) () string {
	return .source.Contents[.start:.end]
}

func ( *Lexer) () string {
	var  string
	switch .Token {
"`x`" or "}x`"
		 = .source.Contents[.start+1 : .end-1]

"`x${" or "}x${"
		 = .source.Contents[.start+1 : .end-2]
	}

	if strings.IndexByte(, '\r') == -1 {
		return 
	}
From the specification: 11.8.6.1 Static Semantics: TV and TRV TV excludes the code units of LineContinuation while TRV includes them. <CR><LF> and <CR> LineTerminatorSequences are normalized to <LF> for both TV and TRV. An explicit EscapeSequence is needed to include a <CR> or <CR><LF> sequence.

	 := []byte()
	 := 0
	 := 0

	for  < len() {
		 := []
		++

Convert '\r\n' into '\n'
			if  < len() && [] == '\n' {
				++
			}
Convert '\r' into '\n'
			 = '\n'
		}

		[] = 
		++
	}

	return string([:])
}

func ( *Lexer) () bool {
	return .Token >= TIdentifier
}

func ( *Lexer) ( string) bool {
	return .Token == TIdentifier && .Raw() == 
}

func ( *Lexer) ( string) {
	if !.IsContextualKeyword() {
		.ExpectedString(fmt.Sprintf("%q", ))
	}
	.Next()
}

func ( *Lexer) () {
	 := logger.Loc{Start: int32(.end)}
	 := "Unexpected end of file"
	if .end < len(.source.Contents) {
		,  := utf8.DecodeRuneInString(.source.Contents[.end:])
		if  < 0x20 {
			 = fmt.Sprintf("Syntax error \"\\x%02X\"", )
		} else if  >= 0x80 {
			 = fmt.Sprintf("Syntax error \"\\u{%x}\"", )
		} else if  != '"' {
			 = fmt.Sprintf("Syntax error \"%c\"", )
		} else {
			 = "Syntax error '\"'"
		}
	}
	.addError(, )
	panic(LexerPanic{})
}

func ( *Lexer) ( string) {
	 := fmt.Sprintf("%q", .Raw())
	if .start == len(.source.Contents) {
		 = "end of file"
	}
	.addRangeError(.Range(), fmt.Sprintf("Expected %s but found %s", , ))
	panic(LexerPanic{})
}

func ( *Lexer) ( T) {
	if ,  := tokenToString[];  {
		.ExpectedString()
	} else {
		.Unexpected()
	}
}

func ( *Lexer) () {
	 := fmt.Sprintf("%q", .Raw())
	if .start == len(.source.Contents) {
		 = "end of file"
	}
	.addRangeError(.Range(), fmt.Sprintf("Unexpected %s", ))
	panic(LexerPanic{})
}

func ( *Lexer) ( T) {
	if .Token !=  {
		.Expected()
	}
	.Next()
}

func ( *Lexer) () {
	if .Token == TSemicolon || (!.HasNewlineBefore &&
		.Token != TCloseBrace && .Token != TEndOfFile) {
		.Expect(TSemicolon)
	}
}
This parses a single "<" token. If that is the first part of a longer token, this function splits off the first "<" and leaves the remainder of the current token as another, smaller token. For example, "<<=" becomes "<=".
func ( *Lexer) ( bool) {
	switch .Token {
	case TLessThan:
		if  {
			.NextInsideJSXElement()
		} else {
			.Next()
		}

	case TLessThanEquals:
		.Token = TEquals
		.start++

	case TLessThanLessThan:
		.Token = TLessThan
		.start++

	case TLessThanLessThanEquals:
		.Token = TLessThanEquals
		.start++

	default:
		.Expected(TLessThan)
	}
}
This parses a single ">" token. If that is the first part of a longer token, this function splits off the first ">" and leaves the remainder of the current token as another, smaller token. For example, ">>=" becomes ">=".
func ( *Lexer) ( bool) {
	switch .Token {
	case TGreaterThan:
		if  {
			.NextInsideJSXElement()
		} else {
			.Next()
		}

	case TGreaterThanEquals:
		.Token = TEquals
		.start++

	case TGreaterThanGreaterThan:
		.Token = TGreaterThan
		.start++

	case TGreaterThanGreaterThanEquals:
		.Token = TGreaterThanEquals
		.start++

	case TGreaterThanGreaterThanGreaterThan:
		.Token = TGreaterThanGreaterThan
		.start++

	case TGreaterThanGreaterThanGreaterThanEquals:
		.Token = TGreaterThanGreaterThanEquals
		.start++

	default:
		.Expected(TGreaterThan)
	}
}

func ( string) bool {
	if len() == 0 {
		return false
	}
	for ,  := range  {
		if  == 0 {
			if !IsIdentifierStart() {
				return false
			}
		} else {
			if !IsIdentifierContinue() {
				return false
			}
		}
	}
	return true
}

func ( string) string {
	if IsIdentifier() {
		return 
	}
	 := strings.Builder{}
Identifier start
	,  := utf8.DecodeRuneInString()
	 = [:]
	if IsIdentifierStart() {
		.WriteRune()
	} else {
		.WriteRune('_')
	}
Identifier continue
	for  != "" {
		,  := utf8.DecodeRuneInString()
		 = [:]
		if IsIdentifierContinue() {
			.WriteRune()
		} else {
			.WriteRune('_')
		}
	}

	return .String()
}
This does "IsIdentifier(UTF16ToString(text))" without any allocations
func ( []uint16) bool {
	 := len()
	if  == 0 {
		return false
	}
	for  := 0;  < ; ++ {
		 := rune([])
		if  >= 0xD800 &&  <= 0xDBFF && +1 <  {
			if  := rune([+1]);  >= 0xDC00 &&  <= 0xDFFF {
				 = ( << 10) +  + (0x10000 - (0xD800 << 10) - 0xDC00)
				++
			}
		}
		if  == 0 {
			if !IsIdentifierStart() {
				return false
			}
		} else {
			if !IsIdentifierContinue() {
				return false
			}
		}
	}
	return true
}

func ( rune) bool {
	switch  {
	case '_', '$',
		'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
		'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
		'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
		'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
		return true
	}
All ASCII identifier start code points are listed above
	if  < 0x7F {
		return false
	}

	return unicode.Is(idStart, )
}

func ( rune) bool {
	switch  {
	case '_', '$', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
		'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
		'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
		'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
		'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
		return true
	}
All ASCII identifier start code points are listed above
	if  < 0x7F {
		return false
	}
ZWNJ and ZWJ are allowed in identifiers
	if  == 0x200C ||  == 0x200D {
		return true
	}

	return unicode.Is(idContinue, )
}
See the "White Space Code Points" table in the ECMAScript standard
func ( rune) bool {
	switch  {
	case
		'\u0009', // character tabulation
		'\u000B', // line tabulation
		'\u000C', // form feed
		'\u0020', // space
		'\u00A0', // no-break space
Unicode "Space_Separator" code points
		'\u1680', // ogham space mark
		'\u2000', // en quad
		'\u2001', // em quad
		'\u2002', // en space
		'\u2003', // em space
		'\u2004', // three-per-em space
		'\u2005', // four-per-em space
		'\u2006', // six-per-em space
		'\u2007', // figure space
		'\u2008', // punctuation space
		'\u2009', // thin space
		'\u200A', // hair space
		'\u202F', // narrow no-break space
		'\u205F', // medium mathematical space
		'\u3000', // ideographic space

		'\uFEFF': // zero width non-breaking space
		return true

	default:
		return false
	}
}

func ( logger.Source,  logger.Loc) logger.Range {
	 := .Contents[.Start:]
	if len() == 0 {
		return logger.Range{Loc: , Len: 0}
	}

	 := 0
	,  := utf8.DecodeRuneInString([:])
Handle private names
	if  == '#' {
		++
		, _ = utf8.DecodeRuneInString([:])
	}

Search for the end of the identifier
		for  < len() {
			,  := utf8.DecodeRuneInString([:])
			if  == '\\' {
				 += 
Skip over bracketed unicode escapes such as "\u{10000}"
				if +2 < len() && [] == 'u' && [+1] == '{' {
					 += 2
					for  < len() {
						if [] == '}' {
							++
							break
						}
						++
					}
				}
			} else if !IsIdentifierContinue() {
				return logger.Range{Loc: , Len: int32()}
			} else {
				 += 
			}
		}
	}
When minifying, this identifier may have originally been a string
	return .RangeOfString()
}

func ( *Lexer) ( T) {
	if .Token !=  {
		.Expected()
	}
	.NextJSXElementChild()
}

func ( *Lexer) () {
	.HasNewlineBefore = false
	 := .end

	for {
		.start = .end
		.Token = 0

		switch .codePoint {
		case -1: // This indicates the end of the file
			.Token = TEndOfFile

		case '{':
			.step()
			.Token = TOpenBrace

		case '<':
			.step()
			.Token = TLessThan

		default:
			 := false

		:
			for {
				switch .codePoint {
Reaching the end of the file without a closing element is an error
					.SyntaxError()

This needs fixing if it has an entity or if it's a multi-line string
					 = true
					.step()

Stop when the string ends
					break 

Non-ASCII strings need the slow path
					if .codePoint >= 0x80 {
						 = true
					}
					.step()
				}
			}

			.Token = TStringLiteral
			 := .source.Contents[:.end]

Skip this token if it turned out to be empty after trimming
				if len(.StringLiteral) == 0 {
					.HasNewlineBefore = true
					continue
				}
Fast path
				 := len()
				 := make([]uint16, )
				for  := 0;  < ; ++ {
					[] = uint16([])
				}
				.StringLiteral = 
			}
		}

		break
	}
}

func ( *Lexer) ( T) {
	if .Token !=  {
		.Expected()
	}
	.NextInsideJSXElement()
}

func ( *Lexer) () {
	.HasNewlineBefore = false

	for {
		.start = .end
		.Token = 0

		switch .codePoint {
		case -1: // This indicates the end of the file
			.Token = TEndOfFile

		case '\r', '\n', '\u2028', '\u2029':
			.step()
			.HasNewlineBefore = true
			continue

		case '\t', ' ':
			.step()
			continue

		case '.':
			.step()
			.Token = TDot

		case '=':
			.step()
			.Token = TEquals

		case '{':
			.step()
			.Token = TOpenBrace

		case '}':
			.step()
			.Token = TCloseBrace

		case '<':
			.step()
			.Token = TLessThan

		case '>':
			.step()
			.Token = TGreaterThan

'/' or '//' or ' ... '
			.step()
			switch .codePoint {
			case '/':
			:
				for {
					.step()
					switch .codePoint {
					case '\r', '\n', '\u2028', '\u2029':
						break 

					case -1: // This indicates the end of the file
						break 
					}
				}
				continue

			case '*':
				.step()
				 := .Range()
			:
				for {
					switch .codePoint {
					case '*':
						.step()
						if .codePoint == '/' {
							.step()
							break 
						}

					case '\r', '\n', '\u2028', '\u2029':
						.step()
						.HasNewlineBefore = true

					case -1: // This indicates the end of the file
						.start = .end
						.addErrorWithNotes(.Loc(), "Expected \"*/\" to terminate multi-line comment",
							[]logger.MsgData{logger.RangeData(&.source, , "The multi-line comment starts here")})
						panic(LexerPanic{})

					default:
						.step()
					}
				}
				continue

			default:
				.Token = TSlash
			}

		case '\'', '"':
			 := .codePoint
			 := false
			.step()

		:
			for {
				switch .codePoint {
				case -1: // This indicates the end of the file
					.SyntaxError()

				case '&':
					 = true
					.step()

				case :
					.step()
					break 

Non-ASCII strings need the slow path
					if .codePoint >= 0x80 {
						 = true
					}
					.step()
				}
			}

			.Token = TStringLiteral
			 := .source.Contents[.start+1 : .end-1]

Slow path
Fast path
				 := len()
				 := make([]uint16, )
				for  := 0;  < ; ++ {
					[] = uint16([])
				}
				.StringLiteral = 
			}

Check for unusual whitespace characters
			if IsWhitespace(.codePoint) {
				.step()
				continue
			}

			if IsIdentifierStart(.codePoint) {
				.step()
				for IsIdentifierContinue(.codePoint) || .codePoint == '-' {
					.step()
				}
Parse JSX namespaces. These are not supported by React or TypeScript but someone using JSX syntax in more obscure ways may find a use for them. A namespaced name is just always turned into a string so you can't use this feature to reference JavaScript identifiers.
				if .codePoint == ':' {
					.step()
					if IsIdentifierStart(.codePoint) {
						.step()
						for IsIdentifierContinue(.codePoint) || .codePoint == '-' {
							.step()
						}
					} else {
						.addError(logger.Loc{Start: .Range().End()},
							fmt.Sprintf("Expected identifier after %q in namespaced JSX name", .Raw()))
					}
				}

				.Identifier = .Raw()
				.Token = TIdentifier
				break
			}

			.end = .current
			.Token = TSyntaxError
		}

		return
	}
}

func ( *Lexer) () {
	.HasNewlineBefore = .end == 0
	.HasPureCommentBefore = false
	.CommentsToPreserveBefore = nil

	for {
		.start = .end
		.Token = 0

		switch .codePoint {
		case -1: // This indicates the end of the file
			.Token = TEndOfFile

		case '#':
"#!/usr/bin/env node"
				.Token = THashbang
			:
				for {
					.step()
					switch .codePoint {
					case '\r', '\n', '\u2028', '\u2029':
						break 

					case -1: // This indicates the end of the file
						break 
					}
				}
				.Identifier = .Raw()
"#foo"
				.step()
				if .codePoint == '\\' {
					.Identifier, _ = .scanIdentifierWithEscapes(privateIdentifier)
				} else {
					if !IsIdentifierStart(.codePoint) {
						.SyntaxError()
					}
					.step()
					for IsIdentifierContinue(.codePoint) {
						.step()
					}
					if .codePoint == '\\' {
						.Identifier, _ = .scanIdentifierWithEscapes(privateIdentifier)
					} else {
						.Identifier = .Raw()
					}
				}
				.Token = TPrivateIdentifier
			}

		case '\r', '\n', '\u2028', '\u2029':
			.step()
			.HasNewlineBefore = true
			continue

		case '\t', ' ':
			.step()
			continue

		case '(':
			.step()
			.Token = TOpenParen

		case ')':
			.step()
			.Token = TCloseParen

		case '[':
			.step()
			.Token = TOpenBracket

		case ']':
			.step()
			.Token = TCloseBracket

		case '{':
			.step()
			.Token = TOpenBrace

		case '}':
			.step()
			.Token = TCloseBrace

		case ',':
			.step()
			.Token = TComma

		case ':':
			.step()
			.Token = TColon

		case ';':
			.step()
			.Token = TSemicolon

		case '@':
			.step()
			.Token = TAt

		case '~':
			.step()
			.Token = TTilde

'?' or '?.' or '??' or '??='
			.step()
			switch .codePoint {
			case '?':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TQuestionQuestionEquals
				default:
					.Token = TQuestionQuestion
				}
			case '.':
				.Token = TQuestion
				 := .current
				 := .source.Contents
Lookahead to disambiguate with 'a?.1:b'
				if  < len() {
					 := []
					if  < '0' ||  > '9' {
						.step()
						.Token = TQuestionDot
					}
				}
			default:
				.Token = TQuestion
			}

'%' or '%='
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TPercentEquals
			default:
				.Token = TPercent
			}

'&' or '&=' or '&&' or '&&='
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TAmpersandEquals
			case '&':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TAmpersandAmpersandEquals
				default:
					.Token = TAmpersandAmpersand
				}
			default:
				.Token = TAmpersand
			}

'|' or '|=' or '||' or '||='
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TBarEquals
			case '|':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TBarBarEquals
				default:
					.Token = TBarBar
				}
			default:
				.Token = TBar
			}

'^' or '^='
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TCaretEquals
			default:
				.Token = TCaret
			}

'+' or '+=' or '++'
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TPlusEquals
			case '+':
				.step()
				.Token = TPlusPlus
			default:
				.Token = TPlus
			}

'-' or '-=' or '--' or '-->'
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TMinusEquals
			case '-':
				.step()
Handle legacy HTML-style comments
				if .codePoint == '>' && .HasNewlineBefore {
					.step()
					.log.AddRangeWarning(&.source, .Range(),
						"Treating \"-->\" as the start of a legacy HTML single-line comment")
				:
					for {
						switch .codePoint {
						case '\r', '\n', '\u2028', '\u2029':
							break 

						case -1: // This indicates the end of the file
							break 
						}
						.step()
					}
					continue
				}

				.Token = TMinusMinus
			default:
				.Token = TMinus
			}

'*' or '*=' or '**' or '**='
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TAsteriskEquals

			case '*':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TAsteriskAsteriskEquals

				default:
					.Token = TAsteriskAsterisk
				}

			default:
				.Token = TAsterisk
			}

'/' or '/=' or '//' or ' ... '
			.step()
			if .forGlobalName {
				.Token = TSlash
				break
			}
			switch .codePoint {
			case '=':
				.step()
				.Token = TSlashEquals
				break

			case '/':
			:
				for {
					.step()
					switch .codePoint {
					case '\r', '\n', '\u2028', '\u2029':
						break 

					case -1: // This indicates the end of the file
						break 
					}
				}
				if .json.parse && !.json.allowComments {
					.addRangeError(.Range(), "JSON does not support comments")
				}
				.scanCommentText()
				continue

			case '*':
				.step()
				 := .Range()
			:
				for {
					switch .codePoint {
					case '*':
						.step()
						if .codePoint == '/' {
							.step()
							break 
						}

					case '\r', '\n', '\u2028', '\u2029':
						.step()
						.HasNewlineBefore = true

					case -1: // This indicates the end of the file
						.start = .end
						.addErrorWithNotes(.Loc(), "Expected \"*/\" to terminate multi-line comment",
							[]logger.MsgData{logger.RangeData(&.source, , "The multi-line comment starts here")})
						panic(LexerPanic{})

					default:
						.step()
					}
				}
				if .json.parse && !.json.allowComments {
					.addRangeError(.Range(), "JSON does not support comments")
				}
				.scanCommentText()
				continue

			default:
				.Token = TSlash
			}

'=' or '=>' or '==' or '==='
			.step()
			switch .codePoint {
			case '>':
				.step()
				.Token = TEqualsGreaterThan
			case '=':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TEqualsEqualsEquals
				default:
					.Token = TEqualsEquals
				}
			default:
				.Token = TEquals
			}

'<' or '<<' or '<=' or '<<=' or '<!--'
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TLessThanEquals
			case '<':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TLessThanLessThanEquals
				default:
					.Token = TLessThanLessThan
				}
Handle legacy HTML-style comments
			case '!':
				if strings.HasPrefix(.source.Contents[.start:], "<!--") {
					.step()
					.step()
					.step()
					.log.AddRangeWarning(&.source, .Range(),
						"Treating \"<!--\" as the start of a legacy HTML single-line comment")
				:
					for {
						switch .codePoint {
						case '\r', '\n', '\u2028', '\u2029':
							break 

						case -1: // This indicates the end of the file
							break 
						}
						.step()
					}
					continue
				}

				.Token = TLessThan

			default:
				.Token = TLessThan
			}

'>' or '>>' or '>>>' or '>=' or '>>=' or '>>>='
			.step()
			switch .codePoint {
			case '=':
				.step()
				.Token = TGreaterThanEquals
			case '>':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TGreaterThanGreaterThanEquals
				case '>':
					.step()
					switch .codePoint {
					case '=':
						.step()
						.Token = TGreaterThanGreaterThanGreaterThanEquals
					default:
						.Token = TGreaterThanGreaterThanGreaterThan
					}
				default:
					.Token = TGreaterThanGreaterThan
				}
			default:
				.Token = TGreaterThan
			}

'!' or '!=' or '!=='
			.step()
			switch .codePoint {
			case '=':
				.step()
				switch .codePoint {
				case '=':
					.step()
					.Token = TExclamationEqualsEquals
				default:
					.Token = TExclamationEquals
				}
			default:
				.Token = TExclamation
			}

		case '\'', '"', '`':
			 := .codePoint
			 := false
			 := 1

			if  != '`' {
				.Token = TStringLiteral
			} else if .rescanCloseBraceAsTemplateToken {
				.Token = TTemplateTail
			} else {
				.Token = TNoSubstitutionTemplateLiteral
			}
			.step()

		:
			for {
				switch .codePoint {
				case '\\':
					 = true
					.step()
Handle Windows CRLF
					if .codePoint == '\r' && !.json.parse {
						.step()
						if .codePoint == '\n' {
							.step()
						}
						continue
					}

				case -1: // This indicates the end of the file
					.addError(logger.Loc{Start: int32(.end)}, "Unterminated string literal")
					panic(LexerPanic{})

				case '\r':
					if  != '`' {
						.addError(logger.Loc{Start: int32(.end)}, "Unterminated string literal")
						panic(LexerPanic{})
					}
Template literals require newline normalization
					 = true

				case '\n':
					if  != '`' {
						.addError(logger.Loc{Start: int32(.end)}, "Unterminated string literal")
						panic(LexerPanic{})
					}

				case '$':
					if  == '`' {
						.step()
						if .codePoint == '{' {
							 = 2
							.step()
							if .rescanCloseBraceAsTemplateToken {
								.Token = TTemplateMiddle
							} else {
								.Token = TTemplateHead
							}
							break 
						}
						continue 
					}

				case :
					.step()
					break 

Non-ASCII strings need the slow path
					if .codePoint >= 0x80 {
						 = true
					} else if .json.parse && .codePoint < 0x20 {
						.SyntaxError()
					}
				}
				.step()
			}

			 := .source.Contents[.start+1 : .end-]

Slow path
				.StringLiteral = .decodeEscapeSequences(.start+1, )
Fast path
				 := len()
				 := make([]uint16, )
				for  := 0;  < ; ++ {
					[] = uint16([])
				}
				.StringLiteral = 
			}

			if  == '\'' && .json.parse {
				.addRangeError(.Range(), "JSON strings must use double quotes")
			}

		case '_', '$',
			'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
			'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
			'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
			'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
			.step()
			for IsIdentifierContinue(.codePoint) {
				.step()
			}
			if .codePoint == '\\' {
				.Identifier, .Token = .scanIdentifierWithEscapes(normalIdentifier)
			} else {
				 := .Raw()
				.Identifier = 
				.Token = Keywords[]
				if .Token == 0 {
					.Token = TIdentifier
				}
			}

		case '\\':
			.Identifier, .Token = .scanIdentifierWithEscapes(normalIdentifier)

		case '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
			.parseNumericLiteralOrDot()

Check for unusual whitespace characters
			if IsWhitespace(.codePoint) {
				.step()
				continue
			}

			if IsIdentifierStart(.codePoint) {
				.step()
				for IsIdentifierContinue(.codePoint) {
					.step()
				}
				if .codePoint == '\\' {
					.Identifier, .Token = .scanIdentifierWithEscapes(normalIdentifier)
				} else {
					.Token = TIdentifier
					.Identifier = .Raw()
				}
				break
			}

			.end = .current
			.Token = TSyntaxError
		}

		return
	}
}

type identifierKind uint8

const (
	normalIdentifier identifierKind = iota
	privateIdentifier
)
This is an edge case that doesn't really exist in the wild, so it doesn't need to be as fast as possible.
First pass: scan over the identifier to see how long it is
Scan a unicode escape sequence. There is at least one because that's what caused us to get on this slow path in the first place.
		if .codePoint == '\\' {
			.step()
			if .codePoint != 'u' {
				.SyntaxError()
			}
			.step()
Variable-length
				.step()
				for .codePoint != '}' {
					switch .codePoint {
					case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
						'a', 'b', 'c', 'd', 'e', 'f',
						'A', 'B', 'C', 'D', 'E', 'F':
						.step()
					default:
						.SyntaxError()
					}
				}
				.step()
Fixed-length
				for  := 0;  < 4; ++ {
					switch .codePoint {
					case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
						'a', 'b', 'c', 'd', 'e', 'f',
						'A', 'B', 'C', 'D', 'E', 'F':
						.step()
					default:
						.SyntaxError()
					}
				}
			}
			continue
		}
Stop when we reach the end of the identifier
		if !IsIdentifierContinue(.codePoint) {
			break
		}
		.step()
	}
Second pass: re-use our existing escape sequence parser
	 := string(utf16.Decode(.decodeEscapeSequences(.start, .Raw())))
Even though it was escaped, it must still be a valid identifier
	 := 
	if  == privateIdentifier {
		 = [1:] // Skip over the "#"
	}
	if !IsIdentifier() {
		.addRangeError(logger.Range{Loc: logger.Loc{Start: int32(.start)}, Len: int32(.end - .start)},
			fmt.Sprintf("Invalid identifier: %q", ))
	}
Escaped keywords are not allowed to work as actual keywords, but they are allowed wherever we allow identifiers or keywords. For example: // This is an error (equivalent to "var var;") var \u0076\u0061\u0072; // This is an error (equivalent to "var foo;" except for this rule) \u0076\u0061\u0072 foo; // This is an fine (equivalent to "foo.var;") foo.\u0076\u0061\u0072;
	if Keywords[] != 0 {
		return , TEscapedKeyword
	} else {
		return , TIdentifier
	}
}

Number or dot
	 := .codePoint
	.step()
Dot without a digit after it
"..."
		if .codePoint == '.' &&
			.current < len(.source.Contents) &&
			.source.Contents[.current] == '.' {
			.step()
			.step()
			.Token = TDotDotDot
			return
		}
"."
		.Token = TDot
		return
	}

	 := 0
	 := 0
	 :=  == '.'
	 := 0.0
	.IsLegacyOctalLiteral = false
Assume this is a number, but potentially change to a bigint later
Check for binary, octal, or hexadecimal literal
	if  == '0' {
		switch .codePoint {
		case 'b', 'B':
			 = 2

		case 'o', 'O':
			 = 8

		case 'x', 'X':
			 = 16

		case '0', '1', '2', '3', '4', '5', '6', '7', '_':
			 = 8
			.IsLegacyOctalLiteral = true
		}
	}

Integer literal
		 := true
		 := false
		.Number = 0
		if !.IsLegacyOctalLiteral {
			.step()
		}

	:
		for {
			switch .codePoint {
Cannot have multiple underscores in a row
				if  > 0 && .end == +1 {
					.SyntaxError()
				}
The first digit must exist
				if  || .IsLegacyOctalLiteral {
					.SyntaxError()
				}

				 = .end
				++

			case '0', '1':
				.Number = .Number* + float64(.codePoint-'0')

			case '2', '3', '4', '5', '6', '7':
				if  == 2 {
					.SyntaxError()
				}
				.Number = .Number* + float64(.codePoint-'0')

			case '8', '9':
				if .IsLegacyOctalLiteral {
					 = true
				} else if  < 10 {
					.SyntaxError()
				}
				.Number = .Number* + float64(.codePoint-'0')

			case 'A', 'B', 'C', 'D', 'E', 'F':
				if  != 16 {
					.SyntaxError()
				}
				.Number = .Number* + float64(.codePoint+10-'A')

			case 'a', 'b', 'c', 'd', 'e', 'f':
				if  != 16 {
					.SyntaxError()
				}
				.Number = .Number* + float64(.codePoint+10-'a')

The first digit must exist
				if  {
					.SyntaxError()
				}

				break 
			}

			.step()
			 = false
		}

		 := .codePoint == 'n' && !
Slow path: do we need to re-scan the input as text?
		if  ||  {
			 := .Raw()
Can't use a leading zero for bigint literals
			if  && .IsLegacyOctalLiteral {
				.SyntaxError()
			}
Filter out underscores
			if  > 0 {
				 := make([]byte, 0, len()-)
				for  := 0;  < len(); ++ {
					 := []
					if  != '_' {
						 = append(, )
					}
				}
				 = string()
			}
Store bigints as text to avoid precision loss
			if  {
				.Identifier = 
Legacy octal literals may turn out to be a base 10 literal after all
				,  := strconv.ParseFloat(, 64)
				.Number = 
			}
		}
Floating-point literal
		 :=  == '0' && (.codePoint == '8' || .codePoint == '9')
Initial digits
		for {
			if .codePoint < '0' || .codePoint > '9' {
				if .codePoint != '_' {
					break
				}
Cannot have multiple underscores in a row
				if  > 0 && .end == +1 {
					.SyntaxError()
				}
The specification forbids underscores in this case
				if  {
					.SyntaxError()
				}

				 = .end
				++
			}
			.step()
		}
Fractional digits
An underscore must not come last
			if  > 0 && .end == +1 {
				.end--
				.SyntaxError()
			}

			 = true
			.step()
			if .codePoint == '_' {
				.SyntaxError()
			}
			for {
				if .codePoint < '0' || .codePoint > '9' {
					if .codePoint != '_' {
						break
					}
Cannot have multiple underscores in a row
					if  > 0 && .end == +1 {
						.SyntaxError()
					}

					 = .end
					++
				}
				.step()
			}
		}
Exponent
An underscore must not come last
			if  > 0 && .end == +1 {
				.end--
				.SyntaxError()
			}

			 = true
			.step()
			if .codePoint == '+' || .codePoint == '-' {
				.step()
			}
			if .codePoint < '0' || .codePoint > '9' {
				.SyntaxError()
			}
			for {
				if .codePoint < '0' || .codePoint > '9' {
					if .codePoint != '_' {
						break
					}
Cannot have multiple underscores in a row
					if  > 0 && .end == +1 {
						.SyntaxError()
					}

					 = .end
					++
				}
				.step()
			}
		}
Take a slice of the text to parse
		 := .Raw()
Filter out underscores
		if  > 0 {
			 := make([]byte, 0, len()-)
			for  := 0;  < len(); ++ {
				 := []
				if  != '_' {
					 = append(, )
				}
			}
			 = string()
		}

The only bigint literal that can start with 0 is "0n"
			if len() > 1 &&  == '0' {
				.SyntaxError()
			}
Store bigints as text to avoid precision loss
			.Identifier = 
Parse a 32-bit integer (very fast path)
			var  uint32 = 0
			for ,  := range  {
				 = *10 + uint32(-'0')
			}
			.Number = float64()
Parse a double-precision floating-point number
			,  := strconv.ParseFloat(, 64)
			.Number = 
		}
	}
An underscore must not come last
	if  > 0 && .end == +1 {
		.end--
		.SyntaxError()
	}
Handle bigint literals after the underscore-at-end check above
	if .codePoint == 'n' && ! {
		.Token = TBigIntegerLiteral
		.step()
	}
Identifiers can't occur immediately after numbers
	if IsIdentifierStart(.codePoint) {
		.SyntaxError()
	}
}

func ( *Lexer) () {
	 := func() {
		if .codePoint == '\\' {
			.step()
		}

		switch .codePoint {
Newlines aren't allowed in regular expressions
			.SyntaxError()

		case -1: // This indicates the end of the file
			.SyntaxError()

		default:
			.step()
		}
	}

	for {
		switch .codePoint {
		case '/':
			.step()
			for IsIdentifierContinue(.codePoint) {
				switch .codePoint {
				case 'g', 'i', 'm', 's', 'u', 'y':
					.step()

				default:
					.SyntaxError()
				}
			}
			return

		case '[':
			.step()
			for .codePoint != ']' {
				()
			}
			.step()

		default:
			()
		}
	}
}

func ( []uint16,  string) []uint16 {
	 := 0

	for  < len() {
		,  := utf8.DecodeRuneInString([:])
		 += 

		if  == '&' {
			 := strings.IndexByte([:], ';')
			if  > 0 {
				 := [ : +]
				if [0] == '#' {
					 := [1:]
					 := 10
					if len() > 1 && [0] == 'x' {
						 = [1:]
						 = 16
					}
					if ,  := strconv.ParseInt(, , 32);  == nil {
						 = rune()
						 +=  + 1
					}
				} else if ,  := jsxEntity[];  {
					 = 
					 +=  + 1
				}
			}
		}

		if  <= 0xFFFF {
			 = append(, uint16())
		} else {
			 -= 0x10000
			 = append(, uint16(0xD800+((>>10)&0x3FF)), uint16(0xDC00+(&0x3FF)))
		}
	}

	return 
}

func ( string) []uint16 {
	 := -1
	 := []uint16{}
	 := 0
Trim whitespace off the end of the first line
	 := 0
Split into lines
	for  < len() {
		,  := utf8.DecodeRuneInString([:])

		switch  {
Newline
			if  != -1 &&  != -1 {
				if len() > 0 {
					 = append(, ' ')
				}
Trim whitespace off the start and end of lines in the middle
				 = decodeJSXEntities(, [:])
			}
Reset for the next line
			 = -1

Whitespace

Check for unusual whitespace characters
			if !IsWhitespace() {
				 =  + 
				if  == -1 {
					 = 
				}
			}
		}

		 += 
	}

	if  != -1 {
		if len() > 0 {
			 = append(, ' ')
		}
Trim whitespace off the start of the last line
		 = decodeJSXEntities(, [:])
	}

	return 
}

func ( *Lexer) ( int,  string) []uint16 {
	 := []uint16{}
	 := 0

	for  < len() {
		,  := utf8.DecodeRuneInString([:])
		 += 

		switch  {
From the specification: 11.8.6.1 Static Semantics: TV and TRV TV excludes the code units of LineContinuation while TRV includes them. <CR><LF> and <CR> LineTerminatorSequences are normalized to <LF> for both TV and TRV. An explicit EscapeSequence is needed to include a <CR> or <CR><LF> sequence.
Convert '\r\n' into '\n'
			if  < len() && [] == '\n' {
				++
			}
Convert '\r' into '\n'
			 = append(, '\n')
			continue

		case '\\':
			,  := utf8.DecodeRuneInString([:])
			 += 

			switch  {
			case 'b':
				 = append(, '\b')
				continue

			case 'f':
				 = append(, '\f')
				continue

			case 'n':
				 = append(, '\n')
				continue

			case 'r':
				 = append(, '\r')
				continue

			case 't':
				 = append(, '\t')
				continue

			case 'v':
				if .json.parse {
					.end =  +  - 
					.SyntaxError()
				}

				 = append(, '\v')
				continue

			case '0', '1', '2', '3', '4', '5', '6', '7':
				 :=  - 2
				if .json.parse {
					.end =  +  - 
					.SyntaxError()
				}
1-3 digit octal
				 := false
				 :=  - '0'
				,  := utf8.DecodeRuneInString([:])
				switch  {
				case '0', '1', '2', '3', '4', '5', '6', '7':
					 = *8 +  - '0'
					 += 
					,  := utf8.DecodeRuneInString([:])
					switch  {
					case '0', '1', '2', '3', '4', '5', '6', '7':
						 := *8 +  - '0'
						if  < 256 {
							 = 
							 += 
						}
					case '8', '9':
						 = true
					}
				case '8', '9':
					 = true
				}
				 = 
Forbid the use of octal literals other than "\0"
				if  || [:] != "\\0" {
					.LegacyOctalLoc = logger.Loc{Start: int32( + )}
				}

			case '8', '9':
				 = 
Forbid the invalid octal literals "\8" and "\9"
				.LegacyOctalLoc = logger.Loc{Start: int32( +  - 2)}

			case 'x':
				if .json.parse {
					.end =  +  - 
					.SyntaxError()
				}
2-digit hexadecimal
				 := '\000'
				for  := 0;  < 2; ++ {
					,  := utf8.DecodeRuneInString([:])
					 += 
					switch  {
					case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
						 = *16 | ( - '0')
					case 'a', 'b', 'c', 'd', 'e', 'f':
						 = *16 | ( + 10 - 'a')
					case 'A', 'B', 'C', 'D', 'E', 'F':
						 = *16 | ( + 10 - 'A')
					default:
						.end =  +  - 
						.SyntaxError()
					}
				}
				 = 

Unicode
				 := '\000'
Check the first character
				,  := utf8.DecodeRuneInString([:])
				 += 

				if  == '{' {
					if .json.parse {
						.end =  +  - 
						.SyntaxError()
					}
Variable-length
					 :=  -  -  - 
					 := true
					 := false
				:
					for {
						,  = utf8.DecodeRuneInString([:])
						 += 

						switch  {
						case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
							 = *16 | ( - '0')
						case 'a', 'b', 'c', 'd', 'e', 'f':
							 = *16 | ( + 10 - 'a')
						case 'A', 'B', 'C', 'D', 'E', 'F':
							 = *16 | ( + 10 - 'A')
						case '}':
							if  {
								.end =  +  - 
								.SyntaxError()
							}
							break 
						default:
							.end =  +  - 
							.SyntaxError()
						}

						if  > utf8.MaxRune {
							 = true
						}

						 = false
					}

					if  {
						.addRangeError(logger.Range{Loc: logger.Loc{Start: int32( + )}, Len: int32( - )},
							"Unicode escape sequence is out of range")
						panic(LexerPanic{})
					}
Fixed-length
					for  := 0;  < 4; ++ {
						switch  {
						case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
							 = *16 | ( - '0')
						case 'a', 'b', 'c', 'd', 'e', 'f':
							 = *16 | ( + 10 - 'a')
						case 'A', 'B', 'C', 'D', 'E', 'F':
							 = *16 | ( + 10 - 'A')
						default:
							.end =  +  - 
							.SyntaxError()
						}

						if  < 3 {
							,  = utf8.DecodeRuneInString([:])
							 += 
						}
					}
				}
				 = 

			case '\r':
				if .json.parse {
					.end =  +  - 
					.SyntaxError()
				}
Ignore line continuations. A line continuation is not an escaped newline.
Make sure Windows CRLF counts as a single newline
					++
				}
				continue

			case '\n', '\u2028', '\u2029':
				if .json.parse {
					.end =  +  - 
					.SyntaxError()
				}
Ignore line continuations. A line continuation is not an escaped newline.
				continue

			default:
				if .json.parse {
					switch  {
					case '"', '\\', '/':

					default:
						.end =  +  - 
						.SyntaxError()
					}
				}

				 = 
			}
		}

		if  <= 0xFFFF {
			 = append(, uint16())
		} else {
			 -= 0x10000
			 = append(, uint16(0xD800+((>>10)&0x3FF)), uint16(0xDC00+(&0x3FF)))
		}
	}

	return 
}

func ( *Lexer) () {
	if .Token != TCloseBrace {
		.Expected(TCloseBrace)
	}

	.rescanCloseBraceAsTemplateToken = true
	.codePoint = '`'
	.current = .end
	.end -= 1
	.Next()
	.rescanCloseBraceAsTemplateToken = false
}

func ( *Lexer) () {
	,  := utf8.DecodeRuneInString(.source.Contents[.current:])
Use -1 to indicate the end of the file
	if  == 0 {
		 = -1
	}
Track the approximate number of newlines in the file so we can preallocate the line offset table in the printer for source maps. The line offset table is the #1 highest allocation in the heap profile, so this is worth doing. This count is approximate because it handles "\n" and "\r\n" (the common cases) but not "\r" or "\u2028" or "\u2029". Getting this wrong is harmless because it's only a preallocation. The array will just grow if it's too small.
	if  == '\n' {
		.ApproximateNewlineCount++
	}

	.codePoint = 
	.end = .current
	.current += 
}

Don't report multiple errors in the same spot
	if  == .prevErrorLoc {
		return
	}
	.prevErrorLoc = 

	if !.IsLogDisabled {
		.log.AddError(&.source, , )
	}
}

Don't report multiple errors in the same spot
	if  == .prevErrorLoc {
		return
	}
	.prevErrorLoc = 

	if !.IsLogDisabled {
		.log.AddErrorWithNotes(&.source, , , )
	}
}

Don't report multiple errors in the same spot
	if .Loc == .prevErrorLoc {
		return
	}
	.prevErrorLoc = .Loc

	if !.IsLogDisabled {
		.log.AddRangeError(&.source, , )
	}
}

func ( string,  string) bool {
	 := len()
	 := len()
	if  >=  && [0:] ==  {
		if  ==  {
			return true
		}
		,  := utf8.DecodeRuneInString([:])
		if !IsIdentifierContinue() {
			return true
		}
	}
	return false
}

type pragmaArg uint8

const (
	pragmaNoSpaceFirst pragmaArg = iota
	pragmaSkipSpaceFirst
)

func ( pragmaArg,  int,  string,  string) (js_ast.Span, bool) {
	 = [len():]
	 += len()

	if  == "" {
		return js_ast.Span{}, false
	}
One or more whitespace characters
	,  := utf8.DecodeRuneInString()
	if  == pragmaSkipSpaceFirst {
		if !IsWhitespace() {
			return js_ast.Span{}, false
		}
		for IsWhitespace() {
			 = [:]
			 += 
			if  == "" {
				return js_ast.Span{}, false
			}
			,  = utf8.DecodeRuneInString()
		}
	}
One or more non-whitespace characters
	 := 0
	for !IsWhitespace() {
		 += 
		if  >= len() {
			break
		}
		,  = utf8.DecodeRuneInString([:])
		if IsWhitespace() {
			break
		}
	}

	return js_ast.Span{
		Text: [:],
		Range: logger.Range{
			Loc: logger.Loc{Start: int32()},
			Len: int32(),
		},
	}, true
}

func ( *Lexer) () {
	 := .source.Contents[.start:.end]
	 := len() > 2 && [2] == '!'
	 := [1] == '*'
Save the original comment text so we can subtract comments from the character frequency analysis used by symbol minification
Omit the trailing "" from the checks below
	 := len()
	if  {
		 -= 2
	}

	for ,  := 0, len();  < ; ++ {
		switch [] {
		case '#':
			 := [+1 : ]
			if hasPrefixWithWordBoundary(, "__PURE__") {
				.HasPureCommentBefore = true
			} else if strings.HasPrefix(, " sourceMappingURL=") {
				if ,  := scanForPragmaArg(pragmaNoSpaceFirst, .start++1, " sourceMappingURL=", );  {
					.SourceMappingURL = 
				}
			}

		case '@':
			 := [+1 : ]
			if hasPrefixWithWordBoundary(, "__PURE__") {
				.HasPureCommentBefore = true
			} else if hasPrefixWithWordBoundary(, "preserve") || hasPrefixWithWordBoundary(, "license") {
				 = true
			} else if hasPrefixWithWordBoundary(, "jsx") {
				if ,  := scanForPragmaArg(pragmaSkipSpaceFirst, .start++1, "jsx", );  {
					.JSXFactoryPragmaComment = 
				}
			} else if hasPrefixWithWordBoundary(, "jsxFrag") {
				if ,  := scanForPragmaArg(pragmaSkipSpaceFirst, .start++1, "jsxFrag", );  {
					.JSXFragmentPragmaComment = 
				}
			} else if strings.HasPrefix(, " sourceMappingURL=") {
				if ,  := scanForPragmaArg(pragmaNoSpaceFirst, .start++1, " sourceMappingURL=", );  {
					.SourceMappingURL = 
				}
			}
		}
	}

	if  || .PreserveAllCommentsBefore {
		if  {
			 = removeMultiLineCommentIndent(.source.Contents[:.start], )
		}

		.CommentsToPreserveBefore = append(.CommentsToPreserveBefore, js_ast.Comment{
			Loc:  logger.Loc{Start: int32(.start)},
			Text: ,
		})
	}
}

Figure out the initial indent
	 := 0
:
	for len() > 0 {
		,  := utf8.DecodeLastRuneInString()
		switch  {
		case '\r', '\n', '\u2028', '\u2029':
			break 
		}
		 = [:len()-]
		++
	}
Split the comment into lines
	var  []string
	 := 0
	for ,  := range  {
		switch  {
Don't double-append for Windows style "\r\n" newlines
			if  <=  {
				 = append(, [:])
			}

			 =  + 1
Ignore the second part of Windows style "\r\n" newlines
			if  == '\r' &&  < len() && [] == '\n' {
				++
			}

		case '\u2028', '\u2029':
			 = append(, [:])
			 =  + 3
		}
	}
	 = append(, [:])
Find the minimum indent over all lines after the first line
	for ,  := range [1:] {
		 := 0
		for ,  := range  {
			if !IsWhitespace() {
				break
			}
			++
		}
		if  >  {
			 = 
		}
	}
Trim the indent off of all lines after the first line
	for ,  := range  {
		if  > 0 {
			[] = [:]
		}
	}
	return strings.Join(, "\n")
}

func ( string) bool {
	for ,  := range  {
		if  > 0xFFFF {
			return true
		}
	}
	return false
}
This does "ContainsNonBMPCodePoint(UTF16ToString(text))" without any allocations
func ( []uint16) bool {
	if  := len();  > 0 {
Check for a high surrogate
Check for a low surrogate
				if  := [+1];  >= 0xDC00 &&  <= 0xDFFF {
					return true
				}
			}
		}
	}
	return false
}

func ( string) []uint16 {
	 := []uint16{}
	for ,  := range  {
		if  <= 0xFFFF {
			 = append(, uint16())
		} else {
			 -= 0x10000
			 = append(, uint16(0xD800+((>>10)&0x3FF)), uint16(0xDC00+(&0x3FF)))
		}
	}
	return 
}

func ( []uint16) string {
	 := make([]byte, utf8.UTFMax)
	 := strings.Builder{}
	 := len()
	for  := 0;  < ; ++ {
		 := rune([])
		if utf16.IsSurrogate() && +1 <  {
			 := rune([+1])
			 = (-0xD800)<<10 | ( - 0xDC00) + 0x10000
			++
		}
		 := encodeWTF8Rune(, )
		.Write([:])
	}
	return .String()
}
Does "UTF16ToString(text) == str" without a temporary allocation
func ( []uint16,  string) bool {
Strings can't be equal if UTF-16 encoding is longer than UTF-8 encoding
		return false
	}
	 := [utf8.UTFMax]byte{}
	 := len()
	 := 0
	for  := 0;  < ; ++ {
		 := rune([])
		if utf16.IsSurrogate() && +1 <  {
			 := rune([+1])
			 = (-0xD800)<<10 | ( - 0xDC00) + 0x10000
			++
		}
		 := encodeWTF8Rune([:], )
		if + > len() {
			return false
		}
		for  := 0;  < ; ++ {
			if [] != [] {
				return false
			}
			++
		}
	}
	return  == len()
}

func ( []uint16,  []uint16) bool {
	if len() == len() {
		for ,  := range  {
			if  != [] {
				return false
			}
		}
		return true
	}
	return false
}
This is a clone of "utf8.EncodeRune" that has been modified to encode using WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for more info.
Negative values are erroneous. Making it unsigned addresses the problem.
	switch  := uint32(); {
	case  <= 0x7F:
		[0] = byte()
		return 1
	case  <= 0x7FF:
		_ = [1] // eliminate bounds checks
		[0] = 0xC0 | byte(>>6)
		[1] = 0x80 | byte()&0x3F
		return 2
	case  > utf8.MaxRune:
		 = utf8.RuneError
		fallthrough
	case  <= 0xFFFF:
		_ = [2] // eliminate bounds checks
		[0] = 0xE0 | byte(>>12)
		[1] = 0x80 | byte(>>6)&0x3F
		[2] = 0x80 | byte()&0x3F
		return 3
	default:
		_ = [3] // eliminate bounds checks
		[0] = 0xF0 | byte(>>18)
		[1] = 0x80 | byte(>>12)&0x3F
		[2] = 0x80 | byte(>>6)&0x3F
		[3] = 0x80 | byte()&0x3F
		return 4
	}
}
This is a clone of "utf8.DecodeRuneInString" that has been modified to decode using WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for more info.
func ( string) (rune, int) {
	 := len()
	if  < 1 {
		return utf8.RuneError, 0
	}

	 := [0]
	if  < 0x80 {
		return rune(), 1
	}

	var  int
	if ( & 0xE0) == 0xC0 {
		 = 2
	} else if ( & 0xF0) == 0xE0 {
		 = 3
	} else if ( & 0xF8) == 0xF0 {
		 = 4
	} else {
		return utf8.RuneError, 1
	}

	if  <  {
		return utf8.RuneError, 0
	}

	 := [1]
	if ( & 0xC0) != 0x80 {
		return utf8.RuneError, 1
	}

	if  == 2 {
		 := rune(&0x1F)<<6 | rune(&0x3F)
		if  < 0x80 {
			return utf8.RuneError, 1
		}
		return , 2
	}
	 := [2]

	if ( & 0xC0) != 0x80 {
		return utf8.RuneError, 1
	}

	if  == 3 {
		 := rune(&0x0F)<<12 | rune(&0x3F)<<6 | rune(&0x3F)
		if  < 0x0800 {
			return utf8.RuneError, 1
		}
		return , 3
	}
	 := [3]

	if ( & 0xC0) != 0x80 {
		return utf8.RuneError, 1
	}

	 := rune(&0x07)<<18 | rune(&0x3F)<<12 | rune(&0x3F)<<6 | rune(&0x3F)
	if  < 0x010000 ||  > 0x10FFFF {
		return utf8.RuneError, 1
	}
	return , 4