package css_lexer
import "github.com/evanw/esbuild/internal/css_lexer"
Index ¶
- func IsNameContinue(c rune) bool
- func IsNameStart(c rune) bool
- func WouldStartIdentifierWithoutEscapes(text string) bool
- type Comment
- type T
- type Token
- type TokenFlags
- type TokenizeResult
Functions ¶
func IsNameContinue ¶
func IsNameStart ¶
func WouldStartIdentifierWithoutEscapes ¶
Types ¶
type Comment ¶
type T ¶
type T uint8
const ( TEndOfFile T = iota TAtKeyword TBadString TBadURL TCDC // "-->" TCDO // "<!--" TCloseBrace TCloseBracket TCloseParen TColon TComma TDelim TDelimAmpersand TDelimAsterisk TDelimBar TDelimCaret TDelimDollar TDelimDot TDelimEquals TDelimExclamation TDelimGreaterThan TDelimMinus TDelimPlus TDelimSlash TDelimTilde TDimension TFunction THash TIdent TNumber TOpenBrace TOpenBracket TOpenParen TPercentage TSemicolon TString TURL TWhitespace )
func (T) IsNumeric ¶
func (T) String ¶
type Token ¶
type Token struct { Range logger.Range // 8 bytes UnitOffset uint16 // 2 bytes Kind T // 1 byte Flags TokenFlags // 1 byte }
This token struct is designed to be memory-efficient. It just references a range in the input file instead of directly containing the substring of text since a range takes up less memory than a string.
func (Token) DecodedText ¶
type TokenFlags ¶
type TokenFlags uint8
const ( IsID TokenFlags = 1 << iota DidWarnAboutSingleLineComment )
type TokenizeResult ¶
type TokenizeResult struct { Tokens []Token LegalComments []Comment SourceMapComment logger.Span ApproximateLineCount int32 }
func Tokenize ¶
func Tokenize(log logger.Log, source logger.Source) TokenizeResult
Source Files ¶
- Version
- v0.16.12
- Published
- Dec 28, 2022
- Platform
- windows/amd64
- Imports
- 4 packages
- Last checked
- 3 minutes ago –
Tools for package owners.