Source File
css_parser.go
Belonging Package
github.com/evanw/esbuild/internal/css_parser
package css_parser
import (
)
type parser struct {
log logger.Log
source logger.Source
options Options
tokens []css_lexer.Token
stack []css_lexer.T
index int
end int
prevError logger.Loc
importRecords []ast.ImportRecord
}
type Options struct {
UnsupportedCSSFeatures compat.CSSFeature
MangleSyntax bool
RemoveWhitespace bool
}
func ( logger.Log, logger.Source, Options) css_ast.AST {
:= parser{
log: ,
source: ,
options: ,
tokens: css_lexer.Tokenize(, ),
prevError: logger.Loc{Start: -1},
}
.end = len(.tokens)
:= css_ast.AST{}
.Rules = .parseListOfRules(ruleContext{
isTopLevel: true,
parseSelectors: true,
})
.ImportRecords = .importRecords
.expect(css_lexer.TEndOfFile)
return
}
func ( *parser) () {
if .index < .end {
.index++
}
}
func ( *parser) ( int) css_lexer.Token {
if < .end {
return .tokens[]
}
if .end < len(.tokens) {
return css_lexer.Token{
Kind: css_lexer.TEndOfFile,
Range: logger.Range{Loc: .tokens[.end].Range.Loc},
}
}
return css_lexer.Token{
Kind: css_lexer.TEndOfFile,
Range: logger.Range{Loc: logger.Loc{Start: int32(len(.source.Contents))}},
}
}
func ( *parser) () css_lexer.Token {
return .at(.index)
}
func ( *parser) () css_lexer.Token {
return .at(.index + 1)
}
func ( *parser) () string {
:= .current()
return .source.Contents[.Range.Loc.Start:.Range.End()]
}
func ( *parser) () string {
return .current().DecodedText(.source.Contents)
}
func ( *parser) ( css_lexer.T) bool {
return == .current().Kind
}
func ( *parser) ( css_lexer.T) bool {
if .peek() {
.advance()
return true
}
return false
}
func ( *parser) ( css_lexer.T) bool {
if .eat() {
return true
}
:= .current()
var string
= "Expected \";\""
= .at(.index - 1)
} else {
switch .Kind {
case css_lexer.TEndOfFile, css_lexer.TWhitespace:
= fmt.Sprintf("Expected %s but found %s", .String(), .Kind.String())
.Range.Len = 0
case css_lexer.TBadURL, css_lexer.TBadString:
= fmt.Sprintf("Expected %s but found %s", .String(), .Kind.String())
default:
= fmt.Sprintf("Expected %s but found %q", .String(), .raw())
}
}
if .Range.Loc.Start > .prevError.Start {
.log.AddRangeWarning(&.source, .Range, )
.prevError = .Range.Loc
}
return false
}
func ( *parser) () {
if := .current(); .Range.Loc.Start > .prevError.Start {
var string
switch .Kind {
case css_lexer.TEndOfFile, css_lexer.TWhitespace:
= fmt.Sprintf("Unexpected %s", .Kind.String())
.Range.Len = 0
case css_lexer.TBadURL, css_lexer.TBadString:
= fmt.Sprintf("Unexpected %s", .Kind.String())
default:
= fmt.Sprintf("Unexpected %q", .raw())
}
.log.AddRangeWarning(&.source, .Range, )
.prevError = .Range.Loc
}
}
type ruleContext struct {
isTopLevel bool
parseSelectors bool
}
func ( *parser) ( ruleContext) []css_ast.R {
:= false
:= false
:= []css_ast.R{}
:= []logger.Loc{}
for {
switch .current().Kind {
case css_lexer.TEndOfFile, css_lexer.TCloseBrace:
return
case css_lexer.TWhitespace:
.advance()
continue
case css_lexer.TAtKeyword:
:= .current().Range
:= .parseAtRule(atRuleContext{})
if .isTopLevel {
switch .(type) {
case *css_ast.RAtCharset:
if ! && len() > 0 {
.log.AddRangeWarningWithNotes(&.source, , "\"@charset\" must be the first rule in the file",
[]logger.MsgData{logger.RangeData(&.source, logger.Range{Loc: [len()-1]},
"This rule cannot come before a \"@charset\" rule")})
= true
}
case *css_ast.RAtImport:
if ! {
:
for , := range {
switch .(type) {
case *css_ast.RAtCharset, *css_ast.RAtImport:
default:
.log.AddRangeWarningWithNotes(&.source, , "All \"@import\" rules must come first",
[]logger.MsgData{logger.RangeData(&.source, logger.Range{Loc: []},
"This rule cannot come before an \"@import\" rule")})
= true
break
}
}
}
}
}
= append(, )
if .isTopLevel {
= append(, .Loc)
}
continue
case css_lexer.TCDO, css_lexer.TCDC:
if .isTopLevel {
.advance()
continue
}
}
if .isTopLevel {
= append(, .current().Range.Loc)
}
if .parseSelectors {
= append(, .parseSelectorRule())
} else {
= append(, .parseQualifiedRuleFrom(.index, false /* isAlreadyInvalid */))
}
}
}
func ( *parser) () ( []css_ast.R) {
for {
switch .current().Kind {
case css_lexer.TWhitespace, css_lexer.TSemicolon:
.advance()
case css_lexer.TEndOfFile, css_lexer.TCloseBrace:
.processDeclarations()
return
case css_lexer.TAtKeyword:
= append(, .parseAtRule(atRuleContext{
isDeclarationList: true,
}))
= append(, .parseSelectorRule())
default:
= append(, .parseDeclaration())
}
}
}
func ( *parser) () (string, logger.Range, bool) {
:= .current()
switch .Kind {
case css_lexer.TString:
:= .decoded()
.advance()
return , .Range, true
case css_lexer.TURL:
:= .decoded()
.advance()
return , .Range, true
case css_lexer.TFunction:
if .decoded() == "url" {
.advance()
= .current()
:= .decoded()
if .expect(css_lexer.TString) && .expect(css_lexer.TCloseParen) {
return , .Range, true
}
}
}
return "", logger.Range{}, false
}
func ( *parser) () ( string, logger.Range, bool) {
, , = .parseURLOrString()
if ! {
.expect(css_lexer.TURL)
}
return
}
type atRuleKind uint8
const (
atRuleUnknown atRuleKind = iota
atRuleDeclarations
atRuleInheritContext
atRuleEmpty
)
var specialAtRules = map[string]atRuleKind{
"font-face": atRuleDeclarations,
"page": atRuleDeclarations,
"document": atRuleInheritContext,
"media": atRuleInheritContext,
"scope": atRuleInheritContext,
"supports": atRuleInheritContext,
}
type atRuleContext struct {
isDeclarationList bool
}
:= .decoded()
:= .current().Range
:= specialAtRules[]
.advance()
:= .index
switch {
case "charset":
= atRuleEmpty
.expect(css_lexer.TWhitespace)
if .peek(css_lexer.TString) {
:= .decoded()
if != "UTF-8" {
.log.AddRangeWarning(&.source, .current().Range,
fmt.Sprintf("\"UTF-8\" will be used instead of unsupported charset %q", ))
}
.advance()
.expect(css_lexer.TSemicolon)
return &css_ast.RAtCharset{Encoding: }
}
.expect(css_lexer.TString)
case "import":
= atRuleEmpty
.eat(css_lexer.TWhitespace)
if , , := .expectURLOrString(); {
.eat(css_lexer.TWhitespace)
.expect(css_lexer.TSemicolon)
:= uint32(len(.importRecords))
.importRecords = append(.importRecords, ast.ImportRecord{
Kind: ast.ImportAt,
Path: logger.Path{Text: },
Range: ,
})
return &css_ast.RAtImport{ImportRecordIndex: }
}
case "keyframes", "-webkit-keyframes", "-moz-keyframes", "-ms-keyframes", "-o-keyframes":
.eat(css_lexer.TWhitespace)
var string
if .peek(css_lexer.TIdent) {
= .decoded()
.advance()
break
}
.eat(css_lexer.TWhitespace)
if .expect(css_lexer.TOpenBrace) {
var []css_ast.KeyframeBlock
:
for {
switch .current().Kind {
case css_lexer.TWhitespace:
.advance()
continue
case css_lexer.TCloseBrace, css_lexer.TEndOfFile:
break
case css_lexer.TOpenBrace:
.expect(css_lexer.TPercentage)
.parseComponentValue()
default:
var []string
:
for {
:= .current()
switch .Kind {
case css_lexer.TWhitespace:
.advance()
continue
case css_lexer.TOpenBrace, css_lexer.TEndOfFile:
break
case css_lexer.TIdent, css_lexer.TPercentage:
:= .decoded()
if .Kind == css_lexer.TIdent {
if == "from" {
if .options.MangleSyntax {
= "0%" // "0%" is equivalent to but shorter than "from"
}
} else if != "to" {
.expect(css_lexer.TPercentage)
}
} else if .options.MangleSyntax && == "100%" {
= "to" // "to" is equivalent to but shorter than "100%"
}
= append(, )
.advance()
default:
.expect(css_lexer.TPercentage)
.parseComponentValue()
}
.eat(css_lexer.TWhitespace)
if .Kind != css_lexer.TComma && !.peek(css_lexer.TOpenBrace) {
.expect(css_lexer.TComma)
}
}
if .expect(css_lexer.TOpenBrace) {
:= .parseListOfDeclarations()
.expect(css_lexer.TCloseBrace)
= append(, css_ast.KeyframeBlock{
Selectors: ,
Rules: ,
})
}
}
}
.expect(css_lexer.TCloseBrace)
return &css_ast.RAtKeyframes{
AtToken: ,
Name: ,
Blocks: ,
}
}
if == atRuleUnknown {
.log.AddRangeWarning(&.source, , "\"@namespace\" rules are not supported")
} else {
.log.AddRangeWarning(&.source, , fmt.Sprintf("%q is not a known rule name", "@"+))
}
}
}
:
for {
switch .current().Kind {
case css_lexer.TOpenBrace, css_lexer.TEndOfFile:
break
case css_lexer.TSemicolon, css_lexer.TCloseBrace:
:= .convertTokens(.tokens[:.index])
if != atRuleEmpty && != atRuleUnknown {
.expect(css_lexer.TOpenBrace)
.eat(css_lexer.TSemicolon)
return &css_ast.RUnknownAt{AtToken: , Prelude: }
}
.expect(css_lexer.TSemicolon)
return &css_ast.RUnknownAt{AtToken: , Prelude: }
default:
.parseComponentValue()
}
}
:= .convertTokens(.tokens[:.index])
:= .index
switch {
.expect(css_lexer.TSemicolon)
.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace)
:= .convertTokens(.tokens[:.index])
return &css_ast.RUnknownAt{AtToken: , Prelude: , Block: }
.advance()
:= .parseListOfDeclarations()
.expect(css_lexer.TCloseBrace)
return &css_ast.RKnownAt{AtToken: , Prelude: , Rules: }
.advance()
var []css_ast.R
if .isDeclarationList {
= .parseListOfDeclarations()
} else {
= .parseListOfRules(ruleContext{
parseSelectors: true,
})
}
.expect(css_lexer.TCloseBrace)
return &css_ast.RKnownAt{AtToken: , Prelude: , Rules: }
.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace)
, := .convertTokensHelper(.tokens[:.index], css_lexer.TEndOfFile, convertTokensOpts{allowImports: true})
return &css_ast.RUnknownAt{AtToken: , Prelude: , Block: }
}
}
func ( *parser) ( []css_lexer.Token) []css_ast.Token {
, := .convertTokensHelper(, css_lexer.TEndOfFile, convertTokensOpts{})
return
}
type convertTokensOpts struct {
allowImports bool
verbatimWhitespace bool
}
func ( *parser) ( []css_lexer.Token, css_lexer.T, convertTokensOpts) ([]css_ast.Token, []css_lexer.Token) {
var []css_ast.Token
var css_ast.WhitespaceFlags
:
for len() > 0 {
:= [0]
= [1:]
if .Kind == {
break
}
:= css_ast.Token{
Kind: .Kind,
Text: .DecodedText(.source.Contents),
Whitespace: ,
}
= 0
switch .Kind {
case css_lexer.TWhitespace:
if := len() - 1; >= 0 {
[].Whitespace |= css_ast.WhitespaceAfter
}
= css_ast.WhitespaceBefore
continue
case css_lexer.TNumber:
if .options.MangleSyntax {
if , := mangleNumber(.Text); {
.Text =
}
}
case css_lexer.TPercentage:
if .options.MangleSyntax {
if , := mangleNumber(.PercentValue()); {
.Text = + "%"
}
}
case css_lexer.TDimension:
.UnitOffset = .UnitOffset
if .options.MangleSyntax {
if , := mangleNumber(.DimensionValue()); {
.Text = + .DimensionUnit()
.UnitOffset = uint16(len())
}
}
case css_lexer.TURL:
.ImportRecordIndex = uint32(len(.importRecords))
.importRecords = append(.importRecords, ast.ImportRecord{
Kind: ast.ImportURL,
Path: logger.Path{Text: .Text},
Range: .Range,
IsUnused: !.allowImports,
})
.Text = ""
case css_lexer.TFunction:
var []css_ast.Token
:=
:=
.verbatimWhitespace = true
}
, = .(, css_lexer.TCloseParen, )
.Children = &
if .Text == "url" && len() == 1 && [0].Kind == css_lexer.TString {
.Kind = css_lexer.TURL
.Text = ""
.Children = nil
.ImportRecordIndex = uint32(len(.importRecords))
.importRecords = append(.importRecords, ast.ImportRecord{
Kind: ast.ImportURL,
Path: logger.Path{Text: [0].Text},
Range: [0].Range,
IsUnused: !.allowImports,
})
}
case css_lexer.TOpenParen:
var []css_ast.Token
, = .(, css_lexer.TCloseParen, )
.Children = &
case css_lexer.TOpenBrace:
var []css_ast.Token
, = .(, css_lexer.TCloseBrace, )
if !.verbatimWhitespace && !.options.RemoveWhitespace && len() > 0 {
[0].Whitespace |= css_ast.WhitespaceBefore
[len()-1].Whitespace |= css_ast.WhitespaceAfter
}
.Children = &
case css_lexer.TOpenBracket:
var []css_ast.Token
, = .(, css_lexer.TCloseBracket, )
.Children = &
}
= append(, )
}
if !.verbatimWhitespace {
for := range {
:= &[]
if == 0 {
.Whitespace &= ^css_ast.WhitespaceBefore
}
if +1 == len() {
.Whitespace &= ^css_ast.WhitespaceAfter
}
switch .Kind {
.Whitespace &= ^css_ast.WhitespaceBefore
if > 0 {
[-1].Whitespace &= ^css_ast.WhitespaceAfter
}
if .options.RemoveWhitespace {
.Whitespace &= ^css_ast.WhitespaceAfter
if +1 < len() {
[+1].Whitespace &= ^css_ast.WhitespaceBefore
}
} else {
.Whitespace |= css_ast.WhitespaceAfter
if +1 < len() {
[+1].Whitespace |= css_ast.WhitespaceBefore
}
}
}
}
}
if .verbatimWhitespace && len() == 0 && == css_ast.WhitespaceBefore {
= append(, css_ast.Token{
Kind: css_lexer.TWhitespace,
})
}
return ,
}
func ( string) (string, bool) {
:=
if +1 == len() {
= [:]
if == "" || == "+" || == "-" {
+= "0"
}
if , := .parseSelectorList(); {
:= css_ast.RSelector{Selectors: }
if .expect(css_lexer.TOpenBrace) {
.Rules = .parseListOfDeclarations()
.expect(css_lexer.TCloseBrace)
return &
}
}
return .parseQualifiedRuleFrom(, true /* isAlreadyInvalid */)
}
func ( *parser) ( int, bool) *css_ast.RQualified {
:
for {
switch .current().Kind {
case css_lexer.TOpenBrace, css_lexer.TEndOfFile:
break
if ! {
.expect(css_lexer.TOpenBrace)
}
:= .convertTokens(.tokens[:.index])
.advance()
return &css_ast.RQualified{Prelude: }
default:
.parseComponentValue()
}
}
:= css_ast.RQualified{
Prelude: .convertTokens(.tokens[:.index]),
}
if .eat(css_lexer.TOpenBrace) {
.Rules = .parseListOfDeclarations()
.expect(css_lexer.TCloseBrace)
} else if ! {
.expect(css_lexer.TOpenBrace)
}
return &
}
:= .index
:
for {
switch .current().Kind {
case css_lexer.TEndOfFile, css_lexer.TSemicolon, css_lexer.TCloseBrace:
break
.parseComponentValue()
.eat(css_lexer.TWhitespace)
if && !.peek(css_lexer.TSemicolon) {
.expect(css_lexer.TSemicolon)
}
break
default:
.parseComponentValue()
}
}
if ! {
return &css_ast.RBadDeclaration{
Tokens: .convertTokens(.tokens[:.index]),
}
}
:= .tokens[]
:= .DecodedText(.source.Contents)
:= .tokens[:.index]
:= strings.HasPrefix(, "--")
:= false
:= len() - 1
if >= 0 && [].Kind == css_lexer.TWhitespace {
--
}
if >= 0 && [].Kind == css_lexer.TIdent && strings.EqualFold([].DecodedText(.source.Contents), "important") {
--
if >= 0 && [].Kind == css_lexer.TWhitespace {
--
}
if >= 0 && [].Kind == css_lexer.TDelimExclamation {
= [:]
= true
}
}
, := .convertTokensHelper(, css_lexer.TEndOfFile, convertTokensOpts{
allowImports: true,
verbatimWhitespace: ,
})
if ! && len() > 0 {
if .options.RemoveWhitespace {
[0].Whitespace &= ^css_ast.WhitespaceBefore
} else {
[0].Whitespace |= css_ast.WhitespaceBefore
}
}
return &css_ast.RDeclaration{
Key: css_ast.KnownDeclarations[],
KeyText: ,
KeyRange: .Range,
Value: ,
Important: ,
}
}
func ( *parser) () {
switch .current().Kind {
case css_lexer.TFunction:
.parseBlock(css_lexer.TFunction, css_lexer.TCloseParen)
case css_lexer.TOpenParen:
.parseBlock(css_lexer.TOpenParen, css_lexer.TCloseParen)
case css_lexer.TOpenBrace:
.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace)
case css_lexer.TOpenBracket:
.parseBlock(css_lexer.TOpenBracket, css_lexer.TCloseBracket)
case css_lexer.TEndOfFile:
.unexpected()
default:
.advance()
}
}
func ( *parser) ( css_lexer.T, css_lexer.T) {
if .expect() {
for !.eat() {
if .peek(css_lexer.TEndOfFile) {
.expect()
return
}
.parseComponentValue()
}
}
![]() |
The pages are generated with Golds v0.3.2-preview. (GOOS=darwin GOARCH=amd64) Golds is a Go 101 project developed by Tapir Liu. PR and bug reports are welcome and can be submitted to the issue list. Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |