1
0
Fork 0
mirror of https://github.com/ossrs/srs.git synced 2025-03-09 15:49:59 +00:00

GB28181: Support GB28181-2016 protocol. v5.0.74 (#3201)

01. Support GB config as StreamCaster.
02. Support disable GB by --gb28181=off.
03. Add utests for SIP examples.
04. Wireshark plugin to decode TCP/9000 as rtp.rfc4571
05. Support MPEGPS program stream codec.
06. Add utest for PS stream codec.
07. Decode MPEGPS packet stream.
08. Carry RTP and PS packet as helper in PS message.
09. Support recover from error mode.
10. Support process by a pack of PS/TS messages.
11. Add statistic for recovered and msgs dropped.
12. Recover from err position fastly.
13. Define state machine for GB session.
14. Bind context to GB session.
15. Re-invite when media disconnected.
16. Update GitHub actions with GB28181.
17. Support parse CANDIDATE by env or pip.
18. Support mux GB28181 to RTMP.
19. Support regression test by srs-bench.
This commit is contained in:
Winlin 2022-10-06 17:40:58 +08:00 committed by GitHub
parent 9c81a0e1bd
commit 5a420ece3b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
298 changed files with 43343 additions and 763 deletions

View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2017 Sergey Kamardin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,63 @@
# httphead.[go](https://golang.org)
[![GoDoc][godoc-image]][godoc-url]
> Tiny HTTP header value parsing library in go.
## Overview
This library contains low-level functions for scanning HTTP RFC2616 compatible header value grammars.
## Install
```shell
go get github.com/gobwas/httphead
```
## Example
The example below shows how multiple-choise HTTP header value could be parsed with this library:
```go
options, ok := httphead.ParseOptions([]byte(`foo;bar=1,baz`), nil)
fmt.Println(options, ok)
// Output: [{foo map[bar:1]} {baz map[]}] true
```
The low-level example below shows how to optimize keys skipping and selection
of some key:
```go
// The right part of full header line like:
// X-My-Header: key;foo=bar;baz,key;baz
header := []byte(`foo;a=0,foo;a=1,foo;a=2,foo;a=3`)
// We want to search key "foo" with an "a" parameter that equal to "2".
var (
foo = []byte(`foo`)
a = []byte(`a`)
v = []byte(`2`)
)
var found bool
httphead.ScanOptions(header, func(i int, key, param, value []byte) Control {
if !bytes.Equal(key, foo) {
return ControlSkip
}
if !bytes.Equal(param, a) {
if bytes.Equal(value, v) {
// Found it!
found = true
return ControlBreak
}
return ControlSkip
}
return ControlContinue
})
```
For more usage examples please see [docs][godoc-url] or package tests.
[godoc-image]: https://godoc.org/github.com/gobwas/httphead?status.svg
[godoc-url]: https://godoc.org/github.com/gobwas/httphead
[travis-image]: https://travis-ci.org/gobwas/httphead.svg?branch=master
[travis-url]: https://travis-ci.org/gobwas/httphead

View file

@ -0,0 +1,200 @@
package httphead
import (
"bytes"
)
// ScanCookie scans cookie pairs from data using DefaultCookieScanner.Scan()
// method.
func ScanCookie(data []byte, it func(key, value []byte) bool) bool {
return DefaultCookieScanner.Scan(data, it)
}
// DefaultCookieScanner is a CookieScanner which is used by ScanCookie().
// Note that it is intended to have the same behavior as http.Request.Cookies()
// has.
var DefaultCookieScanner = CookieScanner{}
// CookieScanner contains options for scanning cookie pairs.
// See https://tools.ietf.org/html/rfc6265#section-4.1.1
type CookieScanner struct {
// DisableNameValidation disables name validation of a cookie. If false,
// only RFC2616 "tokens" are accepted.
DisableNameValidation bool
// DisableValueValidation disables value validation of a cookie. If false,
// only RFC6265 "cookie-octet" characters are accepted.
//
// Note that Strict option also affects validation of a value.
//
// If Strict is false, then scanner begins to allow space and comma
// characters inside the value for better compatibility with non standard
// cookies implementations.
DisableValueValidation bool
// BreakOnPairError sets scanner to immediately return after first pair syntax
// validation error.
// If false, scanner will try to skip invalid pair bytes and go ahead.
BreakOnPairError bool
// Strict enables strict RFC6265 mode scanning. It affects name and value
// validation, as also some other rules.
// If false, it is intended to bring the same behavior as
// http.Request.Cookies().
Strict bool
}
// Scan maps data to name and value pairs. Usually data represents value of the
// Cookie header.
func (c CookieScanner) Scan(data []byte, it func(name, value []byte) bool) bool {
lexer := &Scanner{data: data}
const (
statePair = iota
stateBefore
)
state := statePair
for lexer.Buffered() > 0 {
switch state {
case stateBefore:
// Pairs separated by ";" and space, according to the RFC6265:
// cookie-pair *( ";" SP cookie-pair )
//
// Cookie pairs MUST be separated by (";" SP). So our only option
// here is to fail as syntax error.
a, b := lexer.Peek2()
if a != ';' {
return false
}
state = statePair
advance := 1
if b == ' ' {
advance++
} else if c.Strict {
return false
}
lexer.Advance(advance)
case statePair:
if !lexer.FetchUntil(';') {
return false
}
var value []byte
name := lexer.Bytes()
if i := bytes.IndexByte(name, '='); i != -1 {
value = name[i+1:]
name = name[:i]
} else if c.Strict {
if !c.BreakOnPairError {
goto nextPair
}
return false
}
if !c.Strict {
trimLeft(name)
}
if !c.DisableNameValidation && !ValidCookieName(name) {
if !c.BreakOnPairError {
goto nextPair
}
return false
}
if !c.Strict {
value = trimRight(value)
}
value = stripQuotes(value)
if !c.DisableValueValidation && !ValidCookieValue(value, c.Strict) {
if !c.BreakOnPairError {
goto nextPair
}
return false
}
if !it(name, value) {
return true
}
nextPair:
state = stateBefore
}
}
return true
}
// ValidCookieValue reports whether given value is a valid RFC6265
// "cookie-octet" bytes.
//
// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
// ; US-ASCII characters excluding CTLs,
// ; whitespace DQUOTE, comma, semicolon,
// ; and backslash
//
// Note that the false strict parameter disables errors on space 0x20 and comma
// 0x2c. This could be useful to bring some compatibility with non-compliant
// clients/servers in the real world.
// It acts the same as standard library cookie parser if strict is false.
func ValidCookieValue(value []byte, strict bool) bool {
if len(value) == 0 {
return true
}
for _, c := range value {
switch c {
case '"', ';', '\\':
return false
case ',', ' ':
if strict {
return false
}
default:
if c <= 0x20 {
return false
}
if c >= 0x7f {
return false
}
}
}
return true
}
// ValidCookieName reports wheter given bytes is a valid RFC2616 "token" bytes.
func ValidCookieName(name []byte) bool {
for _, c := range name {
if !OctetTypes[c].IsToken() {
return false
}
}
return true
}
func stripQuotes(bts []byte) []byte {
if last := len(bts) - 1; last > 0 && bts[0] == '"' && bts[last] == '"' {
return bts[1:last]
}
return bts
}
func trimLeft(p []byte) []byte {
var i int
for i < len(p) && OctetTypes[p[i]].IsSpace() {
i++
}
return p[i:]
}
func trimRight(p []byte) []byte {
j := len(p)
for j > 0 && OctetTypes[p[j-1]].IsSpace() {
j--
}
return p[:j]
}

View file

@ -0,0 +1,3 @@
module github.com/gobwas/httphead
go 1.15

View file

@ -0,0 +1,275 @@
package httphead
import (
"bufio"
"bytes"
)
// Version contains protocol major and minor version.
type Version struct {
Major int
Minor int
}
// RequestLine contains parameters parsed from the first request line.
type RequestLine struct {
Method []byte
URI []byte
Version Version
}
// ResponseLine contains parameters parsed from the first response line.
type ResponseLine struct {
Version Version
Status int
Reason []byte
}
// SplitRequestLine splits given slice of bytes into three chunks without
// parsing.
func SplitRequestLine(line []byte) (method, uri, version []byte) {
return split3(line, ' ')
}
// ParseRequestLine parses http request line like "GET / HTTP/1.0".
func ParseRequestLine(line []byte) (r RequestLine, ok bool) {
var i int
for i = 0; i < len(line); i++ {
c := line[i]
if !OctetTypes[c].IsToken() {
if i > 0 && c == ' ' {
break
}
return
}
}
if i == len(line) {
return
}
var proto []byte
r.Method = line[:i]
r.URI, proto = split2(line[i+1:], ' ')
if len(r.URI) == 0 {
return
}
if major, minor, ok := ParseVersion(proto); ok {
r.Version.Major = major
r.Version.Minor = minor
return r, true
}
return r, false
}
// SplitResponseLine splits given slice of bytes into three chunks without
// parsing.
func SplitResponseLine(line []byte) (version, status, reason []byte) {
return split3(line, ' ')
}
// ParseResponseLine parses first response line into ResponseLine struct.
func ParseResponseLine(line []byte) (r ResponseLine, ok bool) {
var (
proto []byte
status []byte
)
proto, status, r.Reason = split3(line, ' ')
if major, minor, ok := ParseVersion(proto); ok {
r.Version.Major = major
r.Version.Minor = minor
} else {
return r, false
}
if n, ok := IntFromASCII(status); ok {
r.Status = n
} else {
return r, false
}
// TODO(gobwas): parse here r.Reason fot TEXT rule:
// TEXT = <any OCTET except CTLs,
// but including LWS>
return r, true
}
var (
httpVersion10 = []byte("HTTP/1.0")
httpVersion11 = []byte("HTTP/1.1")
httpVersionPrefix = []byte("HTTP/")
)
// ParseVersion parses major and minor version of HTTP protocol.
// It returns parsed values and true if parse is ok.
func ParseVersion(bts []byte) (major, minor int, ok bool) {
switch {
case bytes.Equal(bts, httpVersion11):
return 1, 1, true
case bytes.Equal(bts, httpVersion10):
return 1, 0, true
case len(bts) < 8:
return
case !bytes.Equal(bts[:5], httpVersionPrefix):
return
}
bts = bts[5:]
dot := bytes.IndexByte(bts, '.')
if dot == -1 {
return
}
major, ok = IntFromASCII(bts[:dot])
if !ok {
return
}
minor, ok = IntFromASCII(bts[dot+1:])
if !ok {
return
}
return major, minor, true
}
// ReadLine reads line from br. It reads until '\n' and returns bytes without
// '\n' or '\r\n' at the end.
// It returns err if and only if line does not end in '\n'. Note that read
// bytes returned in any case of error.
//
// It is much like the textproto/Reader.ReadLine() except the thing that it
// returns raw bytes, instead of string. That is, it avoids copying bytes read
// from br.
//
// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
// safe with future I/O operations on br.
//
// We could control I/O operations on br and do not need to make additional
// copy for safety.
func ReadLine(br *bufio.Reader) ([]byte, error) {
var line []byte
for {
bts, err := br.ReadSlice('\n')
if err == bufio.ErrBufferFull {
// Copy bytes because next read will discard them.
line = append(line, bts...)
continue
}
// Avoid copy of single read.
if line == nil {
line = bts
} else {
line = append(line, bts...)
}
if err != nil {
return line, err
}
// Size of line is at least 1.
// In other case bufio.ReadSlice() returns error.
n := len(line)
// Cut '\n' or '\r\n'.
if n > 1 && line[n-2] == '\r' {
line = line[:n-2]
} else {
line = line[:n-1]
}
return line, nil
}
}
// ParseHeaderLine parses HTTP header as key-value pair. It returns parsed
// values and true if parse is ok.
func ParseHeaderLine(line []byte) (k, v []byte, ok bool) {
colon := bytes.IndexByte(line, ':')
if colon == -1 {
return
}
k = trim(line[:colon])
for _, c := range k {
if !OctetTypes[c].IsToken() {
return nil, nil, false
}
}
v = trim(line[colon+1:])
return k, v, true
}
// IntFromASCII converts ascii encoded decimal numeric value from HTTP entities
// to an integer.
func IntFromASCII(bts []byte) (ret int, ok bool) {
// ASCII numbers all start with the high-order bits 0011.
// If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
// bits and interpret them directly as an integer.
var n int
if n = len(bts); n < 1 {
return 0, false
}
for i := 0; i < n; i++ {
if bts[i]&0xf0 != 0x30 {
return 0, false
}
ret += int(bts[i]&0xf) * pow(10, n-i-1)
}
return ret, true
}
const (
toLower = 'a' - 'A' // for use with OR.
toUpper = ^byte(toLower) // for use with AND.
)
// CanonicalizeHeaderKey is like standard textproto/CanonicalMIMEHeaderKey,
// except that it operates with slice of bytes and modifies it inplace without
// copying.
func CanonicalizeHeaderKey(k []byte) {
upper := true
for i, c := range k {
if upper && 'a' <= c && c <= 'z' {
k[i] &= toUpper
} else if !upper && 'A' <= c && c <= 'Z' {
k[i] |= toLower
}
upper = c == '-'
}
}
// pow for integers implementation.
// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
func pow(a, b int) int {
p := 1
for b > 0 {
if b&1 != 0 {
p *= a
}
b >>= 1
a *= a
}
return p
}
func split3(p []byte, sep byte) (p1, p2, p3 []byte) {
a := bytes.IndexByte(p, sep)
b := bytes.IndexByte(p[a+1:], sep)
if a == -1 || b == -1 {
return p, nil, nil
}
b += a + 1
return p[:a], p[a+1 : b], p[b+1:]
}
func split2(p []byte, sep byte) (p1, p2 []byte) {
i := bytes.IndexByte(p, sep)
if i == -1 {
return p, nil
}
return p[:i], p[i+1:]
}
func trim(p []byte) []byte {
var i, j int
for i = 0; i < len(p) && (p[i] == ' ' || p[i] == '\t'); {
i++
}
for j = len(p); j > i && (p[j-1] == ' ' || p[j-1] == '\t'); {
j--
}
return p[i:j]
}

View file

@ -0,0 +1,331 @@
// Package httphead contains utils for parsing HTTP and HTTP-grammar compatible
// text protocols headers.
//
// That is, this package first aim is to bring ability to easily parse
// constructions, described here https://tools.ietf.org/html/rfc2616#section-2
package httphead
import (
"bytes"
"strings"
)
// ScanTokens parses data in this form:
//
// list = 1#token
//
// It returns false if data is malformed.
func ScanTokens(data []byte, it func([]byte) bool) bool {
lexer := &Scanner{data: data}
var ok bool
for lexer.Next() {
switch lexer.Type() {
case ItemToken:
ok = true
if !it(lexer.Bytes()) {
return true
}
case ItemSeparator:
if !isComma(lexer.Bytes()) {
return false
}
default:
return false
}
}
return ok && !lexer.err
}
// ParseOptions parses all header options and appends it to given slice of
// Option. It returns flag of successful (wellformed input) parsing.
//
// Note that appended options are all consist of subslices of data. That is,
// mutation of data will mutate appended options.
func ParseOptions(data []byte, options []Option) ([]Option, bool) {
var i int
index := -1
return options, ScanOptions(data, func(idx int, name, attr, val []byte) Control {
if idx != index {
index = idx
i = len(options)
options = append(options, Option{Name: name})
}
if attr != nil {
options[i].Parameters.Set(attr, val)
}
return ControlContinue
})
}
// SelectFlag encodes way of options selection.
type SelectFlag byte
// String represetns flag as string.
func (f SelectFlag) String() string {
var flags [2]string
var n int
if f&SelectCopy != 0 {
flags[n] = "copy"
n++
}
if f&SelectUnique != 0 {
flags[n] = "unique"
n++
}
return "[" + strings.Join(flags[:n], "|") + "]"
}
const (
// SelectCopy causes selector to copy selected option before appending it
// to resulting slice.
// If SelectCopy flag is not passed to selector, then appended options will
// contain sub-slices of the initial data.
SelectCopy SelectFlag = 1 << iota
// SelectUnique causes selector to append only not yet existing option to
// resulting slice. Unique is checked by comparing option names.
SelectUnique
)
// OptionSelector contains configuration for selecting Options from header value.
type OptionSelector struct {
// Check is a filter function that applied to every Option that possibly
// could be selected.
// If Check is nil all options will be selected.
Check func(Option) bool
// Flags contains flags for options selection.
Flags SelectFlag
// Alloc used to allocate slice of bytes when selector is configured with
// SelectCopy flag. It will be called with number of bytes needed for copy
// of single Option.
// If Alloc is nil make is used.
Alloc func(n int) []byte
}
// Select parses header data and appends it to given slice of Option.
// It also returns flag of successful (wellformed input) parsing.
func (s OptionSelector) Select(data []byte, options []Option) ([]Option, bool) {
var current Option
var has bool
index := -1
alloc := s.Alloc
if alloc == nil {
alloc = defaultAlloc
}
check := s.Check
if check == nil {
check = defaultCheck
}
ok := ScanOptions(data, func(idx int, name, attr, val []byte) Control {
if idx != index {
if has && check(current) {
if s.Flags&SelectCopy != 0 {
current = current.Copy(alloc(current.Size()))
}
options = append(options, current)
has = false
}
if s.Flags&SelectUnique != 0 {
for i := len(options) - 1; i >= 0; i-- {
if bytes.Equal(options[i].Name, name) {
return ControlSkip
}
}
}
index = idx
current = Option{Name: name}
has = true
}
if attr != nil {
current.Parameters.Set(attr, val)
}
return ControlContinue
})
if has && check(current) {
if s.Flags&SelectCopy != 0 {
current = current.Copy(alloc(current.Size()))
}
options = append(options, current)
}
return options, ok
}
func defaultAlloc(n int) []byte { return make([]byte, n) }
func defaultCheck(Option) bool { return true }
// Control represents operation that scanner should perform.
type Control byte
const (
// ControlContinue causes scanner to continue scan tokens.
ControlContinue Control = iota
// ControlBreak causes scanner to stop scan tokens.
ControlBreak
// ControlSkip causes scanner to skip current entity.
ControlSkip
)
// ScanOptions parses data in this form:
//
// values = 1#value
// value = token *( ";" param )
// param = token [ "=" (token | quoted-string) ]
//
// It calls given callback with the index of the option, option itself and its
// parameter (attribute and its value, both could be nil). Index is useful when
// header contains multiple choises for the same named option.
//
// Given callback should return one of the defined Control* values.
// ControlSkip means that passed key is not in caller's interest. That is, all
// parameters of that key will be skipped.
// ControlBreak means that no more keys and parameters should be parsed. That
// is, it must break parsing immediately.
// ControlContinue means that caller want to receive next parameter and its
// value or the next key.
//
// It returns false if data is malformed.
func ScanOptions(data []byte, it func(index int, option, attribute, value []byte) Control) bool {
lexer := &Scanner{data: data}
var ok bool
var state int
const (
stateKey = iota
stateParamBeforeName
stateParamName
stateParamBeforeValue
stateParamValue
)
var (
index int
key, param, value []byte
mustCall bool
)
for lexer.Next() {
var (
call bool
growIndex int
)
t := lexer.Type()
v := lexer.Bytes()
switch t {
case ItemToken:
switch state {
case stateKey, stateParamBeforeName:
key = v
state = stateParamBeforeName
mustCall = true
case stateParamName:
param = v
state = stateParamBeforeValue
mustCall = true
case stateParamValue:
value = v
state = stateParamBeforeName
call = true
default:
return false
}
case ItemString:
if state != stateParamValue {
return false
}
value = v
state = stateParamBeforeName
call = true
case ItemSeparator:
switch {
case isComma(v) && state == stateKey:
// Nothing to do.
case isComma(v) && state == stateParamBeforeName:
state = stateKey
// Make call only if we have not called this key yet.
call = mustCall
if !call {
// If we have already called callback with the key
// that just ended.
index++
} else {
// Else grow the index after calling callback.
growIndex = 1
}
case isComma(v) && state == stateParamBeforeValue:
state = stateKey
growIndex = 1
call = true
case isSemicolon(v) && state == stateParamBeforeName:
state = stateParamName
case isSemicolon(v) && state == stateParamBeforeValue:
state = stateParamName
call = true
case isEquality(v) && state == stateParamBeforeValue:
state = stateParamValue
default:
return false
}
default:
return false
}
if call {
switch it(index, key, param, value) {
case ControlBreak:
// User want to stop to parsing parameters.
return true
case ControlSkip:
// User want to skip current param.
state = stateKey
lexer.SkipEscaped(',')
case ControlContinue:
// User is interested in rest of parameters.
// Nothing to do.
default:
panic("unexpected control value")
}
ok = true
param = nil
value = nil
mustCall = false
index += growIndex
}
}
if mustCall {
ok = true
it(index, key, param, value)
}
return ok && !lexer.err
}
func isComma(b []byte) bool {
return len(b) == 1 && b[0] == ','
}
func isSemicolon(b []byte) bool {
return len(b) == 1 && b[0] == ';'
}
func isEquality(b []byte) bool {
return len(b) == 1 && b[0] == '='
}

View file

@ -0,0 +1,360 @@
package httphead
import (
"bytes"
)
// ItemType encodes type of the lexing token.
type ItemType int
const (
// ItemUndef reports that token is undefined.
ItemUndef ItemType = iota
// ItemToken reports that token is RFC2616 token.
ItemToken
// ItemSeparator reports that token is RFC2616 separator.
ItemSeparator
// ItemString reports that token is RFC2616 quouted string.
ItemString
// ItemComment reports that token is RFC2616 comment.
ItemComment
// ItemOctet reports that token is octet slice.
ItemOctet
)
// Scanner represents header tokens scanner.
// See https://tools.ietf.org/html/rfc2616#section-2
type Scanner struct {
data []byte
pos int
itemType ItemType
itemBytes []byte
err bool
}
// NewScanner creates new RFC2616 data scanner.
func NewScanner(data []byte) *Scanner {
return &Scanner{data: data}
}
// Next scans for next token. It returns true on successful scanning, and false
// on error or EOF.
func (l *Scanner) Next() bool {
c, ok := l.nextChar()
if !ok {
return false
}
switch c {
case '"': // quoted-string;
return l.fetchQuotedString()
case '(': // comment;
return l.fetchComment()
case '\\', ')': // unexpected chars;
l.err = true
return false
default:
return l.fetchToken()
}
}
// FetchUntil fetches ItemOctet from current scanner position to first
// occurence of the c or to the end of the underlying data.
func (l *Scanner) FetchUntil(c byte) bool {
l.resetItem()
if l.pos == len(l.data) {
return false
}
return l.fetchOctet(c)
}
// Peek reads byte at current position without advancing it. On end of data it
// returns 0.
func (l *Scanner) Peek() byte {
if l.pos == len(l.data) {
return 0
}
return l.data[l.pos]
}
// Peek2 reads two first bytes at current position without advancing it.
// If there not enough data it returs 0.
func (l *Scanner) Peek2() (a, b byte) {
if l.pos == len(l.data) {
return 0, 0
}
if l.pos+1 == len(l.data) {
return l.data[l.pos], 0
}
return l.data[l.pos], l.data[l.pos+1]
}
// Buffered reporst how many bytes there are left to scan.
func (l *Scanner) Buffered() int {
return len(l.data) - l.pos
}
// Advance moves current position index at n bytes. It returns true on
// successful move.
func (l *Scanner) Advance(n int) bool {
l.pos += n
if l.pos > len(l.data) {
l.pos = len(l.data)
return false
}
return true
}
// Skip skips all bytes until first occurence of c.
func (l *Scanner) Skip(c byte) {
if l.err {
return
}
// Reset scanner state.
l.resetItem()
if i := bytes.IndexByte(l.data[l.pos:], c); i == -1 {
// Reached the end of data.
l.pos = len(l.data)
} else {
l.pos += i + 1
}
}
// SkipEscaped skips all bytes until first occurence of non-escaped c.
func (l *Scanner) SkipEscaped(c byte) {
if l.err {
return
}
// Reset scanner state.
l.resetItem()
if i := ScanUntil(l.data[l.pos:], c); i == -1 {
// Reached the end of data.
l.pos = len(l.data)
} else {
l.pos += i + 1
}
}
// Type reports current token type.
func (l *Scanner) Type() ItemType {
return l.itemType
}
// Bytes returns current token bytes.
func (l *Scanner) Bytes() []byte {
return l.itemBytes
}
func (l *Scanner) nextChar() (byte, bool) {
// Reset scanner state.
l.resetItem()
if l.err {
return 0, false
}
l.pos += SkipSpace(l.data[l.pos:])
if l.pos == len(l.data) {
return 0, false
}
return l.data[l.pos], true
}
func (l *Scanner) resetItem() {
l.itemType = ItemUndef
l.itemBytes = nil
}
func (l *Scanner) fetchOctet(c byte) bool {
i := l.pos
if j := bytes.IndexByte(l.data[l.pos:], c); j == -1 {
// Reached the end of data.
l.pos = len(l.data)
} else {
l.pos += j
}
l.itemType = ItemOctet
l.itemBytes = l.data[i:l.pos]
return true
}
func (l *Scanner) fetchToken() bool {
n, t := ScanToken(l.data[l.pos:])
if n == -1 {
l.err = true
return false
}
l.itemType = t
l.itemBytes = l.data[l.pos : l.pos+n]
l.pos += n
return true
}
func (l *Scanner) fetchQuotedString() (ok bool) {
l.pos++
n := ScanUntil(l.data[l.pos:], '"')
if n == -1 {
l.err = true
return false
}
l.itemType = ItemString
l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
l.pos += n + 1
return true
}
func (l *Scanner) fetchComment() (ok bool) {
l.pos++
n := ScanPairGreedy(l.data[l.pos:], '(', ')')
if n == -1 {
l.err = true
return false
}
l.itemType = ItemComment
l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
l.pos += n + 1
return true
}
// ScanUntil scans for first non-escaped character c in given data.
// It returns index of matched c and -1 if c is not found.
func ScanUntil(data []byte, c byte) (n int) {
for {
i := bytes.IndexByte(data[n:], c)
if i == -1 {
return -1
}
n += i
if n == 0 || data[n-1] != '\\' {
break
}
n++
}
return
}
// ScanPairGreedy scans for complete pair of opening and closing chars in greedy manner.
// Note that first opening byte must not be present in data.
func ScanPairGreedy(data []byte, open, close byte) (n int) {
var m int
opened := 1
for {
i := bytes.IndexByte(data[n:], close)
if i == -1 {
return -1
}
n += i
// If found index is not escaped then it is the end.
if n == 0 || data[n-1] != '\\' {
opened--
}
for m < i {
j := bytes.IndexByte(data[m:i], open)
if j == -1 {
break
}
m += j + 1
opened++
}
if opened == 0 {
break
}
n++
m = n
}
return
}
// RemoveByte returns data without c. If c is not present in data it returns
// the same slice. If not, it copies data without c.
func RemoveByte(data []byte, c byte) []byte {
j := bytes.IndexByte(data, c)
if j == -1 {
return data
}
n := len(data) - 1
// If character is present, than allocate slice with n-1 capacity. That is,
// resulting bytes could be at most n-1 length.
result := make([]byte, n)
k := copy(result, data[:j])
for i := j + 1; i < n; {
j = bytes.IndexByte(data[i:], c)
if j != -1 {
k += copy(result[k:], data[i:i+j])
i = i + j + 1
} else {
k += copy(result[k:], data[i:])
break
}
}
return result[:k]
}
// SkipSpace skips spaces and lws-sequences from p.
// It returns number ob bytes skipped.
func SkipSpace(p []byte) (n int) {
for len(p) > 0 {
switch {
case len(p) >= 3 &&
p[0] == '\r' &&
p[1] == '\n' &&
OctetTypes[p[2]].IsSpace():
p = p[3:]
n += 3
case OctetTypes[p[0]].IsSpace():
p = p[1:]
n++
default:
return
}
}
return
}
// ScanToken scan for next token in p. It returns length of the token and its
// type. It do not trim p.
func ScanToken(p []byte) (n int, t ItemType) {
if len(p) == 0 {
return 0, ItemUndef
}
c := p[0]
switch {
case OctetTypes[c].IsSeparator():
return 1, ItemSeparator
case OctetTypes[c].IsToken():
for n = 1; n < len(p); n++ {
c := p[n]
if !OctetTypes[c].IsToken() {
break
}
}
return n, ItemToken
default:
return -1, ItemUndef
}
}

View file

@ -0,0 +1,83 @@
package httphead
// OctetType desribes character type.
//
// From the "Basic Rules" chapter of RFC2616
// See https://tools.ietf.org/html/rfc2616#section-2.2
//
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// UPALPHA = <any US-ASCII uppercase letter "A".."Z">
// LOALPHA = <any US-ASCII lowercase letter "a".."z">
// ALPHA = UPALPHA | LOALPHA
// DIGIT = <any US-ASCII digit "0".."9">
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
//
// Many HTTP/1.1 header field values consist of words separated by LWS
// or special characters. These special characters MUST be in a quoted
// string to be used within a parameter value (as defined in section
// 3.6).
//
// token = 1*<any CHAR except CTLs or separators>
// separators = "(" | ")" | "<" | ">" | "@"
// | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "="
// | "{" | "}" | SP | HT
type OctetType byte
// IsChar reports whether octet is CHAR.
func (t OctetType) IsChar() bool { return t&octetChar != 0 }
// IsControl reports whether octet is CTL.
func (t OctetType) IsControl() bool { return t&octetControl != 0 }
// IsSeparator reports whether octet is separator.
func (t OctetType) IsSeparator() bool { return t&octetSeparator != 0 }
// IsSpace reports whether octet is space (SP or HT).
func (t OctetType) IsSpace() bool { return t&octetSpace != 0 }
// IsToken reports whether octet is token.
func (t OctetType) IsToken() bool { return t&octetToken != 0 }
const (
octetChar OctetType = 1 << iota
octetControl
octetSpace
octetSeparator
octetToken
)
// OctetTypes is a table of octets.
var OctetTypes [256]OctetType
func init() {
for c := 32; c < 256; c++ {
var t OctetType
if c <= 127 {
t |= octetChar
}
if 0 <= c && c <= 31 || c == 127 {
t |= octetControl
}
switch c {
case '(', ')', '<', '>', '@', ',', ';', ':', '"', '/', '[', ']', '?', '=', '{', '}', '\\':
t |= octetSeparator
case ' ', '\t':
t |= octetSpace | octetSeparator
}
if t.IsChar() && !t.IsControl() && !t.IsSeparator() && !t.IsSpace() {
t |= octetToken
}
OctetTypes[c] = t
}
}

View file

@ -0,0 +1,193 @@
package httphead
import (
"bytes"
"sort"
)
// Option represents a header option.
type Option struct {
Name []byte
Parameters Parameters
}
// Size returns number of bytes need to be allocated for use in opt.Copy.
func (opt Option) Size() int {
return len(opt.Name) + opt.Parameters.bytes
}
// Copy copies all underlying []byte slices into p and returns new Option.
// Note that p must be at least of opt.Size() length.
func (opt Option) Copy(p []byte) Option {
n := copy(p, opt.Name)
opt.Name = p[:n]
opt.Parameters, p = opt.Parameters.Copy(p[n:])
return opt
}
// Clone is a shorthand for making slice of opt.Size() sequenced with Copy()
// call.
func (opt Option) Clone() Option {
return opt.Copy(make([]byte, opt.Size()))
}
// String represents option as a string.
func (opt Option) String() string {
return "{" + string(opt.Name) + " " + opt.Parameters.String() + "}"
}
// NewOption creates named option with given parameters.
func NewOption(name string, params map[string]string) Option {
p := Parameters{}
for k, v := range params {
p.Set([]byte(k), []byte(v))
}
return Option{
Name: []byte(name),
Parameters: p,
}
}
// Equal reports whether option is equal to b.
func (opt Option) Equal(b Option) bool {
if bytes.Equal(opt.Name, b.Name) {
return opt.Parameters.Equal(b.Parameters)
}
return false
}
// Parameters represents option's parameters.
type Parameters struct {
pos int
bytes int
arr [8]pair
dyn []pair
}
// Equal reports whether a equal to b.
func (p Parameters) Equal(b Parameters) bool {
switch {
case p.dyn == nil && b.dyn == nil:
case p.dyn != nil && b.dyn != nil:
default:
return false
}
ad, bd := p.data(), b.data()
if len(ad) != len(bd) {
return false
}
sort.Sort(pairs(ad))
sort.Sort(pairs(bd))
for i := 0; i < len(ad); i++ {
av, bv := ad[i], bd[i]
if !bytes.Equal(av.key, bv.key) || !bytes.Equal(av.value, bv.value) {
return false
}
}
return true
}
// Size returns number of bytes that needed to copy p.
func (p *Parameters) Size() int {
return p.bytes
}
// Copy copies all underlying []byte slices into dst and returns new
// Parameters.
// Note that dst must be at least of p.Size() length.
func (p *Parameters) Copy(dst []byte) (Parameters, []byte) {
ret := Parameters{
pos: p.pos,
bytes: p.bytes,
}
if p.dyn != nil {
ret.dyn = make([]pair, len(p.dyn))
for i, v := range p.dyn {
ret.dyn[i], dst = v.copy(dst)
}
} else {
for i, p := range p.arr {
ret.arr[i], dst = p.copy(dst)
}
}
return ret, dst
}
// Get returns value by key and flag about existence such value.
func (p *Parameters) Get(key string) (value []byte, ok bool) {
for _, v := range p.data() {
if string(v.key) == key {
return v.value, true
}
}
return nil, false
}
// Set sets value by key.
func (p *Parameters) Set(key, value []byte) {
p.bytes += len(key) + len(value)
if p.pos < len(p.arr) {
p.arr[p.pos] = pair{key, value}
p.pos++
return
}
if p.dyn == nil {
p.dyn = make([]pair, len(p.arr), len(p.arr)+1)
copy(p.dyn, p.arr[:])
}
p.dyn = append(p.dyn, pair{key, value})
}
// ForEach iterates over parameters key-value pairs and calls cb for each one.
func (p *Parameters) ForEach(cb func(k, v []byte) bool) {
for _, v := range p.data() {
if !cb(v.key, v.value) {
break
}
}
}
// String represents parameters as a string.
func (p *Parameters) String() (ret string) {
ret = "["
for i, v := range p.data() {
if i > 0 {
ret += " "
}
ret += string(v.key) + ":" + string(v.value)
}
return ret + "]"
}
func (p *Parameters) data() []pair {
if p.dyn != nil {
return p.dyn
}
return p.arr[:p.pos]
}
type pair struct {
key, value []byte
}
func (p pair) copy(dst []byte) (pair, []byte) {
n := copy(dst, p.key)
p.key = dst[:n]
m := n + copy(dst[n:], p.value)
p.value = dst[n:m]
dst = dst[m:]
return p, dst
}
type pairs []pair
func (p pairs) Len() int { return len(p) }
func (p pairs) Less(a, b int) bool { return bytes.Compare(p[a].key, p[b].key) == -1 }
func (p pairs) Swap(a, b int) { p[a], p[b] = p[b], p[a] }

View file

@ -0,0 +1,101 @@
package httphead
import "io"
var (
comma = []byte{','}
equality = []byte{'='}
semicolon = []byte{';'}
quote = []byte{'"'}
escape = []byte{'\\'}
)
// WriteOptions write options list to the dest.
// It uses the same form as {Scan,Parse}Options functions:
// values = 1#value
// value = token *( ";" param )
// param = token [ "=" (token | quoted-string) ]
//
// It wraps valuse into the quoted-string sequence if it contains any
// non-token characters.
func WriteOptions(dest io.Writer, options []Option) (n int, err error) {
w := writer{w: dest}
for i, opt := range options {
if i > 0 {
w.write(comma)
}
writeTokenSanitized(&w, opt.Name)
for _, p := range opt.Parameters.data() {
w.write(semicolon)
writeTokenSanitized(&w, p.key)
if len(p.value) != 0 {
w.write(equality)
writeTokenSanitized(&w, p.value)
}
}
}
return w.result()
}
// writeTokenSanitized writes token as is or as quouted string if it contains
// non-token characters.
//
// Note that is is not expects LWS sequnces be in s, cause LWS is used only as
// header field continuation:
// "A CRLF is allowed in the definition of TEXT only as part of a header field
// continuation. It is expected that the folding LWS will be replaced with a
// single SP before interpretation of the TEXT value."
// See https://tools.ietf.org/html/rfc2616#section-2
//
// That is we sanitizing s for writing, so there could not be any header field
// continuation.
// That is any CRLF will be escaped as any other control characters not allowd in TEXT.
func writeTokenSanitized(bw *writer, bts []byte) {
var qt bool
var pos int
for i := 0; i < len(bts); i++ {
c := bts[i]
if !OctetTypes[c].IsToken() && !qt {
qt = true
bw.write(quote)
}
if OctetTypes[c].IsControl() || c == '"' {
if !qt {
qt = true
bw.write(quote)
}
bw.write(bts[pos:i])
bw.write(escape)
bw.write(bts[i : i+1])
pos = i + 1
}
}
if !qt {
bw.write(bts)
} else {
bw.write(bts[pos:])
bw.write(quote)
}
}
type writer struct {
w io.Writer
n int
err error
}
func (w *writer) write(p []byte) {
if w.err != nil {
return
}
var n int
n, w.err = w.w.Write(p)
w.n += n
return
}
func (w *writer) result() (int, error) {
return w.n, w.err
}