package shlex
import "github.com/anmitsu/go-shlex"
Package shlex provides a simple lexical analysis like Unix shell.
Index ¶
- Variables
- func Split(s string, posix bool) ([]string, error)
- type DefaultTokenizer
- func (t *DefaultTokenizer) IsEscape(r rune) bool
- func (t *DefaultTokenizer) IsEscapedQuote(r rune) bool
- func (t *DefaultTokenizer) IsQuote(r rune) bool
- func (t *DefaultTokenizer) IsWhitespace(r rune) bool
- func (t *DefaultTokenizer) IsWord(r rune) bool
- type Lexer
- func NewLexer(r io.Reader, posix, whitespacesplit bool) *Lexer
- func NewLexerString(s string, posix, whitespacesplit bool) *Lexer
- func (l *Lexer) SetTokenizer(t Tokenizer)
- func (l *Lexer) Split() ([]string, error)
- type Tokenizer
Examples ¶
Variables ¶
var ( ErrNoClosing = errors.New("No closing quotation") ErrNoEscaped = errors.New("No escaped character") )
Functions ¶
func Split ¶
Split splits a string according to posix or non-posix rules.
Code:
Output:Example¶
{
cmd := `cp -Rdp "file name" 'file name2' dir\ name`
// Split of cmd with POSIX mode.
words1, err := shlex.Split(cmd, true)
if err != nil {
log.Fatal(err)
}
// Split of cmd with Non-POSIX mode.
words2, err := shlex.Split(cmd, false)
if err != nil {
log.Fatal(err)
}
fmt.Println("Source command:")
fmt.Println(`cp -Rdp "file name" 'file name2' dir\ name`)
fmt.Println()
fmt.Println("POSIX mode:")
for _, word := range words1 {
fmt.Println(word)
}
fmt.Println()
fmt.Println("Non-POSIX mode:")
for _, word := range words2 {
fmt.Println(word)
}
// Output:
// Source command:
// cp -Rdp "file name" 'file name2' dir\ name
//
// POSIX mode:
// cp
// -Rdp
// file name
// file name2
// dir name
//
// Non-POSIX mode:
// cp
// -Rdp
// "file name"
// 'file name2'
// dir\
// name
}
Source command:
cp -Rdp "file name" 'file name2' dir\ name
POSIX mode:
cp
-Rdp
file name
file name2
dir name
Non-POSIX mode:
cp
-Rdp
"file name"
'file name2'
dir\
name
Types ¶
type DefaultTokenizer ¶
type DefaultTokenizer struct{}
DefaultTokenizer implements a simple tokenizer like Unix shell.
func (*DefaultTokenizer) IsEscape ¶
func (t *DefaultTokenizer) IsEscape(r rune) bool
func (*DefaultTokenizer) IsEscapedQuote ¶
func (t *DefaultTokenizer) IsEscapedQuote(r rune) bool
func (*DefaultTokenizer) IsQuote ¶
func (t *DefaultTokenizer) IsQuote(r rune) bool
func (*DefaultTokenizer) IsWhitespace ¶
func (t *DefaultTokenizer) IsWhitespace(r rune) bool
func (*DefaultTokenizer) IsWord ¶
func (t *DefaultTokenizer) IsWord(r rune) bool
type Lexer ¶
type Lexer struct {
// contains filtered or unexported fields
}
Lexer represents a lexical analyzer.
func NewLexer ¶
NewLexer creates a new Lexer reading from io.Reader. This Lexer has a DefaultTokenizer according to posix and whitespacesplit rules.
func NewLexerString ¶
NewLexerString creates a new Lexer reading from a string. This Lexer has a DefaultTokenizer according to posix and whitespacesplit rules.
func (*Lexer) SetTokenizer ¶
SetTokenizer sets a Tokenizer.
func (*Lexer) Split ¶
type Tokenizer ¶
type Tokenizer interface { IsWord(rune) bool IsWhitespace(rune) bool IsQuote(rune) bool IsEscape(rune) bool IsEscapedQuote(rune) bool }
Tokenizer is the interface that classifies a token according to words, whitespaces, quotations, escapes and escaped quotations.
Source Files ¶
- Version
- v0.0.0-20200514113438-38f4b401e2be (latest)
- Published
- May 14, 2020
- Platform
- js/wasm
- Imports
- 5 packages
- Last checked
- now –
Tools for package owners.