package css_lexer

import "github.com/evanw/esbuild/internal/css_lexer"

Index

Functions

func IsNameContinue

func IsNameContinue(c rune) bool

func IsNameStart

func IsNameStart(c rune) bool

func WouldStartIdentifierWithoutEscapes

func WouldStartIdentifierWithoutEscapes(text string) bool

Types

type Comment

type Comment struct {
	Text            string
	Loc             logger.Loc
	TokenIndexAfter uint32
}

type T

type T uint8
const (
	TEndOfFile T = iota

	TAtKeyword
	TBadString
	TBadURL
	TCDC // "-->"
	TCDO // "<!--"
	TCloseBrace
	TCloseBracket
	TCloseParen
	TColon
	TComma
	TDelim
	TDelimAmpersand
	TDelimAsterisk
	TDelimBar
	TDelimCaret
	TDelimDollar
	TDelimDot
	TDelimEquals
	TDelimExclamation
	TDelimGreaterThan
	TDelimMinus
	TDelimPlus
	TDelimSlash
	TDelimTilde
	TDimension
	TFunction
	THash
	TIdent
	TNumber
	TOpenBrace
	TOpenBracket
	TOpenParen
	TPercentage
	TSemicolon
	TString
	TURL
	TWhitespace
)

func (T) IsNumeric

func (t T) IsNumeric() bool

func (T) String

func (t T) String() string

type Token

type Token struct {
	Range      logger.Range // 8 bytes
	UnitOffset uint16       // 2 bytes
	Kind       T            // 1 byte
	Flags      TokenFlags   // 1 byte
}

This token struct is designed to be memory-efficient. It just references a range in the input file instead of directly containing the substring of text since a range takes up less memory than a string.

func (Token) DecodedText

func (token Token) DecodedText(contents string) string

type TokenFlags

type TokenFlags uint8
const (
	IsID TokenFlags = 1 << iota
	DidWarnAboutSingleLineComment
)

type TokenizeResult

type TokenizeResult struct {
	Tokens               []Token
	LegalComments        []Comment
	SourceMapComment     logger.Span
	ApproximateLineCount int32
}

func Tokenize

func Tokenize(log logger.Log, source logger.Source) TokenizeResult

Source Files

css_lexer.go

Version
v0.14.41
Published
May 27, 2022
Platform
windows/amd64
Imports
4 packages
Last checked
3 hours ago

Tools for package owners.