Copyright 2018 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.

package modfile

import (
	
	
	
	
	
	
	
	
)
A Position describes an arbitrary source position in a file, including the file, line, column, and byte offset.
type Position struct {
	Line     int // line in input (starting at 1)
	LineRune int // rune in line (starting at 1)
	Byte     int // byte in input (starting at 0)
}
add returns the position at the end of s, assuming it starts at p.
func ( Position) ( string) Position {
	.Byte += len()
	if  := strings.Count(, "\n");  > 0 {
		.Line += 
		 = [strings.LastIndex(, "\n")+1:]
		.LineRune = 1
	}
	.LineRune += utf8.RuneCountInString()
	return 
}
An Expr represents an input element.
Span returns the start and end position of the expression, excluding leading or trailing comments.
	Span() (start, end Position)
Comment returns the comments attached to the expression. This method would normally be named 'Comments' but that would interfere with embedding a type of the same name.
	Comment() *Comments
}
A Comment represents a single // comment.
type Comment struct {
	Start  Position
	Token  string // without trailing newline
	Suffix bool   // an end of line (not whole line) comment
}
Comments collects the comments associated with an expression.
type Comments struct {
	Before []Comment // whole-line comments before this expression
	Suffix []Comment // end-of-line comments after this expression
For top-level expressions only, After lists whole-line comments following the expression.
Comment returns the receiver. This isn't useful by itself, but a Comments struct is embedded into all the expression implementation types, and this gives each of those a Comment method to satisfy the Expr interface.
func ( *Comments) () *Comments {
	return 
}
A FileSyntax represents an entire go.mod file.
type FileSyntax struct {
	Name string // file path
	Comments
	Stmt []Expr
}

func ( *FileSyntax) () (,  Position) {
	if len(.Stmt) == 0 {
		return
	}
	, _ = .Stmt[0].Span()
	_,  = .Stmt[len(.Stmt)-1].Span()
	return , 
}
addLine adds a line containing the given tokens to the file. If the first token of the hint matches the first token of the line, the new line is added at the end of the block containing hint, extracting hint into a new block if it is not yet in one. If the hint is non-nil buts its first token does not match, the new line is added after the block containing hint (or hint itself, if not in a block). If no hint is provided, addLine appends the line to the end of the last block with a matching first token, or to the end of the file if no such block exists.
func ( *FileSyntax) ( Expr,  ...string) *Line {
If no hint given, add to the last statement of the given type.
	:
		for  := len(.Stmt) - 1;  >= 0; -- {
			 := .Stmt[]
			switch stmt := .(type) {
			case *Line:
				if .Token != nil && .Token[0] == [0] {
					 = 
					break 
				}
			case *LineBlock:
				if .Token[0] == [0] {
					 = 
					break 
				}
			}
		}
	}

	 := func( int) *Line {
		 := &Line{Token: }
		if  == len(.Stmt) {
			.Stmt = append(.Stmt, )
		} else {
			.Stmt = append(.Stmt, nil)
			copy(.Stmt[+2:], .Stmt[+1:])
			.Stmt[+1] = 
		}
		return 
	}

	if  != nil {
		for ,  := range .Stmt {
			switch stmt := .(type) {
			case *Line:
				if  ==  {
					if .Token == nil || .Token[0] != [0] {
						return ()
					}
Convert line to line block.
					.InBlock = true
					 := &LineBlock{Token: .Token[:1], Line: []*Line{}}
					.Token = .Token[1:]
					.Stmt[] = 
					 := &Line{Token: [1:], InBlock: true}
					.Line = append(.Line, )
					return 
				}

			case *LineBlock:
				if  ==  {
					if .Token[0] != [0] {
						return ()
					}

					 := &Line{Token: [1:], InBlock: true}
					.Line = append(.Line, )
					return 
				}

				for ,  := range .Line {
					if  ==  {
						if .Token[0] != [0] {
							return ()
						}
Add new line after hint within the block.
						.Line = append(.Line, nil)
						copy(.Line[+2:], .Line[+1:])
						 := &Line{Token: [1:], InBlock: true}
						.Line[+1] = 
						return 
					}
				}
			}
		}
	}

	 := &Line{Token: }
	.Stmt = append(.Stmt, )
	return 
}

func ( *FileSyntax) ( *Line,  ...string) {
	if .InBlock {
		 = [1:]
	}
	.Token = 
}

func ( *FileSyntax) ( *Line) {
	.Token = nil
}
Cleanup cleans up the file syntax x after any edit operations. To avoid quadratic behavior, removeLine marks the line as dead by setting line.Token = nil but does not remove it from the slice in which it appears. After edits have all been indicated, calling Cleanup cleans out the dead lines.
func ( *FileSyntax) () {
	 := 0
	for ,  := range .Stmt {
		switch stmt := .(type) {
		case *Line:
			if .Token == nil {
				continue
			}
		case *LineBlock:
			 := 0
			for ,  := range .Line {
				if .Token != nil {
					.Line[] = 
					++
				}
			}
			if  == 0 {
				continue
			}
Collapse block into single line.
				 := &Line{
					Comments: Comments{
						Before: commentsAdd(.Before, .Line[0].Before),
						Suffix: commentsAdd(.Line[0].Suffix, .Suffix),
						After:  commentsAdd(.Line[0].After, .After),
					},
					Token: stringsAdd(.Token, .Line[0].Token),
				}
				.Stmt[] = 
				++
				continue
			}
			.Line = .Line[:]
		}
		.Stmt[] = 
		++
	}
	.Stmt = .Stmt[:]
}

func (,  []Comment) []Comment {
	return append([:len():len()], ...)
}

func (,  []string) []string {
	return append([:len():len()], ...)
}
A CommentBlock represents a top-level block of comments separate from any rule.
type CommentBlock struct {
	Comments
	Start Position
}

func ( *CommentBlock) () (,  Position) {
	return .Start, .Start
}
A Line is a single line of tokens.
type Line struct {
	Comments
	Start   Position
	Token   []string
	InBlock bool
	End     Position
}

func ( *Line) () (,  Position) {
	return .Start, .End
}
A LineBlock is a factored block of lines, like require ( "x" "y" )
type LineBlock struct {
	Comments
	Start  Position
	LParen LParen
	Token  []string
	Line   []*Line
	RParen RParen
}

func ( *LineBlock) () (,  Position) {
	return .Start, .RParen.Pos.add(")")
}
An LParen represents the beginning of a parenthesized line block. It is a place to store suffix comments.
type LParen struct {
	Comments
	Pos Position
}

func ( *LParen) () (,  Position) {
	return .Pos, .Pos.add(")")
}
An RParen represents the end of a parenthesized line block. It is a place to store whole-line (before) comments.
type RParen struct {
	Comments
	Pos Position
}

func ( *RParen) () (,  Position) {
	return .Pos, .Pos.add(")")
}
An input represents a single input file being parsed.
Lexing state.
	filename   string    // name of input file, for errors
	complete   []byte    // entire input
	remaining  []byte    // remaining input
	tokenStart []byte    // token being scanned to end of input
	token      token     // next token to be returned by lex, peek
	pos        Position  // current input position
	comments   []Comment // accumulated comments
Parser state.
	file        *FileSyntax // returned top-level syntax tree
	parseErrors ErrorList   // errors encountered during parsing
Comment assignment state.
	pre  []Expr // all expressions, in preorder traversal
	post []Expr // all expressions, in postorder traversal
}

func ( string,  []byte) *input {
	return &input{
		filename:  ,
		complete:  ,
		remaining: ,
		pos:       Position{Line: 1, LineRune: 1, Byte: 0},
	}
}
parse parses the input file.
The parser panics for both routine errors like syntax errors and for programmer bugs like array index errors. Turn both into error returns. Catching bug panics is especially important when processing many files.
	 := newInput(, )
	defer func() {
		if  := recover();  != nil &&  != &.parseErrors {
			.parseErrors = append(.parseErrors, Error{
				Filename: .filename,
				Pos:      .pos,
				Err:      fmt.Errorf("internal error: %v", ),
			})
		}
		if  == nil && len(.parseErrors) > 0 {
			 = .parseErrors
		}
	}()
Prime the lexer by reading in the first token. It will be available in the next peek() or lex() call.
	.readToken()
Invoke the parser.
	.parseFile()
	if len(.parseErrors) > 0 {
		return nil, .parseErrors
	}
	.file.Name = .filename
Assign comments to nearby syntax.
	.assignComments()

	return .file, nil
}
Error is called to report an error. Error does not return: it panics.
func ( *input) ( string) {
	.parseErrors = append(.parseErrors, Error{
		Filename: .filename,
		Pos:      .pos,
		Err:      errors.New(),
	})
	panic(&.parseErrors)
}
eof reports whether the input has reached end of file.
func ( *input) () bool {
	return len(.remaining) == 0
}
peekRune returns the next rune in the input without consuming it.
func ( *input) () int {
	if len(.remaining) == 0 {
		return 0
	}
	,  := utf8.DecodeRune(.remaining)
	return int()
}
peekPrefix reports whether the remaining input begins with the given prefix.
This is like bytes.HasPrefix(in.remaining, []byte(prefix)) but without the allocation of the []byte copy of prefix.
	for  := 0;  < len(); ++ {
		if  >= len(.remaining) || .remaining[] != [] {
			return false
		}
	}
	return true
}
readRune consumes and returns the next rune in the input.
func ( *input) () int {
	if len(.remaining) == 0 {
		.Error("internal lexer error: readRune at EOF")
	}
	,  := utf8.DecodeRune(.remaining)
	.remaining = .remaining[:]
	if  == '\n' {
		.pos.Line++
		.pos.LineRune = 1
	} else {
		.pos.LineRune++
	}
	.pos.Byte += 
	return int()
}

type token struct {
	kind   tokenKind
	pos    Position
	endPos Position
	text   string
}

type tokenKind int

const (
	_EOF tokenKind = -(iota + 1)
	_EOLCOMMENT
	_IDENT
	_STRING
	_COMMENT
newlines and punctuation tokens are allowed as ASCII codes.
)

func ( tokenKind) () bool {
	return  == _COMMENT ||  == _EOLCOMMENT
}
isEOL returns whether a token terminates a line.
func ( tokenKind) () bool {
	return  == _EOF ||  == _EOLCOMMENT ||  == '\n'
}
startToken marks the beginning of the next input token. It must be followed by a call to endToken, once the token's text has been consumed using readRune.
func ( *input) () {
	.tokenStart = .remaining
	.token.text = ""
	.token.pos = .pos
}
endToken marks the end of an input token. It records the actual token string in tok.text. A single trailing newline (LF or CRLF) will be removed from comment tokens.
func ( *input) ( tokenKind) {
	.token.kind = 
	 := string(.tokenStart[:len(.tokenStart)-len(.remaining)])
	if .isComment() {
		if strings.HasSuffix(, "\r\n") {
			 = [:len()-2]
		} else {
			 = strings.TrimSuffix(, "\n")
		}
	}
	.token.text = 
	.token.endPos = .pos
}
peek returns the kind of the the next token returned by lex.
func ( *input) () tokenKind {
	return .token.kind
}
lex is called from the parser to obtain the next input token.
func ( *input) () token {
	 := .token
	.readToken()
	return 
}
readToken lexes the next token from the text and stores it in in.token.
Skip past spaces, stopping at non-space or EOF.
	for !.eof() {
		 := .peekRune()
		if  == ' ' ||  == '\t' ||  == '\r' {
			.readRune()
			continue
		}
Comment runs to end of line.
		if .peekPrefix("//") {
			.startToken()
Is this comment the only thing on its line? Find the last \n before this // and see if it's all spaces from there to here.
			 := bytes.LastIndex(.complete[:.pos.Byte], []byte("\n"))
			 := len(bytes.TrimSpace(.complete[+1:.pos.Byte])) > 0
			.readRune()
			.readRune()
Consume comment.
			for len(.remaining) > 0 && .readRune() != '\n' {
			}
If we are at top level (not in a statement), hand the comment to the parser as a _COMMENT token. The grammar is written to handle top-level comments itself.
			if ! {
				.endToken(_COMMENT)
				return
			}
Otherwise, save comment for later attachment to syntax tree.
			.endToken(_EOLCOMMENT)
			.comments = append(.comments, Comment{.token.pos, .token.text, })
			return
		}

		if .peekPrefix("/*") {
			.Error("mod files must use // comments (not /* */ comments)")
		}
Found non-space non-comment.
		break
	}
Found the beginning of the next token.
	.startToken()
End of file.
	if .eof() {
		.endToken(_EOF)
		return
	}
Punctuation tokens.
	switch  := .peekRune();  {
	case '\n', '(', ')', '[', ']', '{', '}', ',':
		.readRune()
		.endToken(tokenKind())
		return

	case '"', '`': // quoted string
		 := 
		.readRune()
		for {
			if .eof() {
				.pos = .token.pos
				.Error("unexpected EOF in string")
			}
			if .peekRune() == '\n' {
				.Error("unexpected newline in string")
			}
			 := .readRune()
			if  ==  {
				break
			}
			if  == '\\' &&  != '`' {
				if .eof() {
					.pos = .token.pos
					.Error("unexpected EOF in string")
				}
				.readRune()
			}
		}
		.endToken(_STRING)
		return
	}
Checked all punctuation. Must be identifier token.
	if  := .peekRune(); !isIdent() {
		.Error(fmt.Sprintf("unexpected input character %#q", ))
	}
Scan over identifier.
	for isIdent(.peekRune()) {
		if .peekPrefix("//") {
			break
		}
		if .peekPrefix("/*") {
			.Error("mod files must use // comments (not /* */ comments)")
		}
		.readRune()
	}
	.endToken(_IDENT)
}
isIdent reports whether c is an identifier rune. We treat most printable runes as identifier runes, except for a handful of ASCII punctuation characters.
func ( int) bool {
	switch  := rune();  {
	case ' ', '(', ')', '[', ']', '{', '}', ',':
		return false
	default:
		return !unicode.IsSpace() && unicode.IsPrint()
	}
}
Comment assignment. We build two lists of all subexpressions, preorder and postorder. The preorder list is ordered by start location, with outer expressions first. The postorder list is ordered by end location, with outer expressions last. We use the preorder list to assign each whole-line comment to the syntax immediately following it, and we use the postorder list to assign each end-of-line comment to the syntax immediately preceding it.
order walks the expression adding it and its subexpressions to the preorder and postorder lists.
func ( *input) ( Expr) {
	if  != nil {
		.pre = append(.pre, )
	}
	switch x := .(type) {
	default:
		panic(fmt.Errorf("order: unexpected type %T", ))
nothing
nothing
nothing
nothing
	case *FileSyntax:
		for ,  := range .Stmt {
			.()
		}
	case *LineBlock:
		.(&.LParen)
		for ,  := range .Line {
			.()
		}
		.(&.RParen)
	}
	if  != nil {
		.post = append(.post, )
	}
}
assignComments attaches comments to nearby syntax.
func ( *input) () {
	const  = false
Generate preorder and postorder lists.
	.order(.file)
Split into whole-line comments and suffix comments.
	var ,  []Comment
	for ,  := range .comments {
		if .Suffix {
			 = append(, )
		} else {
			 = append(, )
		}
	}

	if  {
		for ,  := range  {
			fmt.Fprintf(os.Stderr, "LINE %q :%d:%d #%d\n", .Token, .Start.Line, .Start.LineRune, .Start.Byte)
		}
	}
Assign line comments to syntax immediately following.
	for ,  := range .pre {
		,  := .Span()
		if  {
			fmt.Fprintf(os.Stderr, "pre %T :%d:%d #%d\n", , .Line, .LineRune, .Byte)
		}
		 := .Comment()
		for len() > 0 && .Byte >= [0].Start.Byte {
			if  {
				fmt.Fprintf(os.Stderr, "ASSIGN LINE %q #%d\n", [0].Token, [0].Start.Byte)
			}
			.Before = append(.Before, [0])
			 = [1:]
		}
	}
Remaining line comments go at end of file.
	.file.After = append(.file.After, ...)

	if  {
		for ,  := range  {
			fmt.Fprintf(os.Stderr, "SUFFIX %q :%d:%d #%d\n", .Token, .Start.Line, .Start.LineRune, .Start.Byte)
		}
	}
Assign suffix comments to syntax immediately before.
	for  := len(.post) - 1;  >= 0; -- {
		 := .post[]

		,  := .Span()
		if  {
			fmt.Fprintf(os.Stderr, "post %T :%d:%d #%d :%d:%d #%d\n", , .Line, .LineRune, .Byte, .Line, .LineRune, .Byte)
		}
Do not assign suffix comments to end of line block or whole file. Instead assign them to the last element inside.
		switch .(type) {
		case *FileSyntax:
			continue
		}
Do not assign suffix comments to something that starts on an earlier line, so that in x ( y z ) // comment we assign the comment to z and not to x ( ... ).
		if .Line != .Line {
			continue
		}
		 := .Comment()
		for len() > 0 && .Byte <= [len()-1].Start.Byte {
			if  {
				fmt.Fprintf(os.Stderr, "ASSIGN SUFFIX %q #%d\n", [len()-1].Token, [len()-1].Start.Byte)
			}
			.Suffix = append(.Suffix, [len()-1])
			 = [:len()-1]
		}
	}
We assigned suffix comments in reverse. If multiple suffix comments were appended to the same expression node, they are now in reverse. Fix that.
	for ,  := range .post {
		reverseComments(.Comment().Suffix)
	}
Remaining suffix comments go at beginning of file.
	.file.Before = append(.file.Before, ...)
}
reverseComments reverses the []Comment list.
func ( []Comment) {
	for ,  := 0, len()-1;  < ; ,  = +1, -1 {
		[], [] = [], []
	}
}

func ( *input) () {
	.file = new(FileSyntax)
	var  *CommentBlock
	for {
		switch .peek() {
		case '\n':
			.lex()
			if  != nil {
				.file.Stmt = append(.file.Stmt, )
				 = nil
			}
		case _COMMENT:
			 := .lex()
			if  == nil {
				 = &CommentBlock{Start: .pos}
			}
			 := .Comment()
			.Before = append(.Before, Comment{Start: .pos, Token: .text})
		case _EOF:
			if  != nil {
				.file.Stmt = append(.file.Stmt, )
			}
			return
		default:
			.parseStmt()
			if  != nil {
				.file.Stmt[len(.file.Stmt)-1].Comment().Before = .Before
				 = nil
			}
		}
	}
}

func ( *input) () {
	 := .lex()
	 := .pos
	 := .endPos
	 := []string{.text}
	for {
		 := .lex()
		switch {
		case .kind.isEOL():
			.file.Stmt = append(.file.Stmt, &Line{
				Start: ,
				Token: ,
				End:   ,
			})
			return

		case .kind == '(':
Start of block: no more tokens on this line.
				.file.Stmt = append(.file.Stmt, .parseLineBlock(, , ))
				return
			} else if  == ')' {
				 := .lex()
Empty block.
					.lex()
					.file.Stmt = append(.file.Stmt, &LineBlock{
						Start:  ,
						Token:  ,
						LParen: LParen{Pos: .pos},
						RParen: RParen{Pos: .pos},
					})
					return
'( )' in the middle of the line, not a block.
				 = append(, .text, .text)
'(' in the middle of the line, not a block.
				 = append(, .text)
			}

		default:
			 = append(, .text)
			 = .endPos
		}
	}
}

func ( *input) ( Position,  []string,  token) *LineBlock {
	 := &LineBlock{
		Start:  ,
		Token:  ,
		LParen: LParen{Pos: .pos},
	}
	var  []Comment
	for {
		switch .peek() {
Suffix comment, will be attached later by assignComments.
			.lex()
Blank line. Add an empty comment to preserve it.
			.lex()
			if len() == 0 && len(.Line) > 0 || len() > 0 && [len()-1].Token != "" {
				 = append(, Comment{})
			}
		case _COMMENT:
			 := .lex()
			 = append(, Comment{Start: .pos, Token: .text})
		case _EOF:
			.Error(fmt.Sprintf("syntax error (unterminated block started at %s:%d:%d)", .filename, .Start.Line, .Start.LineRune))
		case ')':
			 := .lex()
			.RParen.Before = 
			.RParen.Pos = .pos
			if !.peek().isEOL() {
				.Error("syntax error (expected newline after closing paren)")
			}
			.lex()
			return 
		default:
			 := .parseLine()
			.Line = append(.Line, )
			.Comment().Before = 
			 = nil
		}
	}
}

func ( *input) () *Line {
	 := .lex()
	if .kind.isEOL() {
		.Error("internal parse error: parseLine at end of line")
	}
	 := .pos
	 := .endPos
	 := []string{.text}
	for {
		 := .lex()
		if .kind.isEOL() {
			return &Line{
				Start:   ,
				Token:   ,
				End:     ,
				InBlock: true,
			}
		}
		 = append(, .text)
		 = .endPos
	}
}

var (
	slashSlash = []byte("//")
	moduleStr  = []byte("module")
)
ModulePath returns the module path from the gomod file text. If it cannot find a module path, it returns an empty string. It is tolerant of unrelated problems in the go.mod file.
func ( []byte) string {
	for len() > 0 {
		 := 
		 = nil
		if  := bytes.IndexByte(, '\n');  >= 0 {
			,  = [:], [+1:]
		}
		if  := bytes.Index(, slashSlash);  >= 0 {
			 = [:]
		}
		 = bytes.TrimSpace()
		if !bytes.HasPrefix(, moduleStr) {
			continue
		}
		 = [len(moduleStr):]
		 := len()
		 = bytes.TrimSpace()
		if len() ==  || len() == 0 {
			continue
		}

		if [0] == '"' || [0] == '`' {
			,  := strconv.Unquote(string())
			if  != nil {
				return "" // malformed quoted string or multiline module path
			}
			return 
		}

		return string()
	}
	return "" // missing module path