Merge branch 'master' of https://github.com/didi/nightingale
This commit is contained in:
commit
7b1ccd956b
|
@ -54,4 +54,4 @@ _test
|
|||
/src/modules/transfer/transfer
|
||||
/src/modules/tsdb/tsdb
|
||||
|
||||
|
||||
tmp/
|
||||
|
|
|
@ -24,4 +24,8 @@
|
|||
|
||||
3.2.0
|
||||
影响模块:n9e-agent etc/agent.yml
|
||||
更新内容:agent支持metrics指标采集能力
|
||||
更新内容:agent支持metrics指标采集能力,这个版本是为商业版本服务的,开源用户无需更新
|
||||
|
||||
3.3.0
|
||||
影响模块:n9e-rdb n9e-transfer
|
||||
更新内容:增强安全性:密码复杂度、cookie处理优化等;支持M3DB作为存储后端(如果要尝试M3需要修改transfer配置文件)
|
||||
|
|
2
control
2
control
|
@ -1,7 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
# release version
|
||||
version=3.3.0
|
||||
version=3.3.0.pre
|
||||
|
||||
CWD=$(cd $(dirname $0)/; pwd)
|
||||
cd $CWD
|
||||
|
|
|
@ -107,7 +107,7 @@ http {
|
|||
try_files $uri /layout/index.html;
|
||||
}
|
||||
|
||||
location ~ .*(.htm|.html|manifest.json)$ {
|
||||
location ~ .*(.htm|.html|.json)$ {
|
||||
add_header Cache-Control "private, no-store, no-cache, must-revalidate, proxy-revalidate";
|
||||
}
|
||||
|
||||
|
|
|
@ -10,9 +10,8 @@ http:
|
|||
|
||||
sso:
|
||||
enable: false
|
||||
ssoAddr: "http://10.1.2.3:8071"
|
||||
# TODO: redirectURL: "http://10.1.2.3:8072/auth-callback"
|
||||
redirectURL: "http://10.1.2.3:8072/api/rdb/auth/callback"
|
||||
ssoAddr: "http://{sso-host}"
|
||||
redirectURL: "http://{rdb-host}/auth-callback"
|
||||
clientId: ""
|
||||
clientSecret: ""
|
||||
apiKey: ""
|
||||
|
|
|
@ -19,6 +19,7 @@ CREATE TABLE `user`
|
|||
`is_root` tinyint(1) not null,
|
||||
`leader_id` int unsigned not null default 0,
|
||||
`leader_name` varchar(32) not null default '',
|
||||
`create_at` timestamp not null default CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY (`username`),
|
||||
UNIQUE KEY (`uuid`)
|
||||
|
|
|
@ -28,3 +28,6 @@ CREATE TABLE `captcha` (
|
|||
KEY (`captcha_id`, `answer`),
|
||||
KEY (`created_at`)
|
||||
) ENGINE = InnoDB DEFAULT CHARSET = utf8;
|
||||
|
||||
alter table user add column create_at timestamp not null default CURRENT_TIMESTAMP;
|
||||
update user set create_at = '2020-11-14 17:00:08';
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"log"
|
||||
"path"
|
||||
"time"
|
||||
"log"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"xorm.io/core"
|
||||
"xorm.io/xorm"
|
||||
"xorm.io/core"
|
||||
"xorm.io/xorm"
|
||||
|
||||
"github.com/toolkits/pkg/file"
|
||||
"github.com/toolkits/pkg/runner"
|
||||
"github.com/toolkits/pkg/file"
|
||||
"github.com/toolkits/pkg/runner"
|
||||
)
|
||||
|
||||
type MySQLConf struct {
|
||||
|
|
|
@ -28,19 +28,20 @@ const (
|
|||
)
|
||||
|
||||
type User struct {
|
||||
Id int64 `json:"id"`
|
||||
UUID string `json:"uuid" xorm:"'uuid'"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"-"`
|
||||
Dispname string `json:"dispname"`
|
||||
Phone string `json:"phone"`
|
||||
Email string `json:"email"`
|
||||
Im string `json:"im"`
|
||||
Portrait string `json:"portrait"`
|
||||
Intro string `json:"intro"`
|
||||
IsRoot int `json:"is_root"`
|
||||
LeaderId int64 `json:"leader_id"`
|
||||
LeaderName string `json:"leader_name"`
|
||||
Id int64 `json:"id"`
|
||||
UUID string `json:"uuid" xorm:"'uuid'"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"-"`
|
||||
Dispname string `json:"dispname"`
|
||||
Phone string `json:"phone"`
|
||||
Email string `json:"email"`
|
||||
Im string `json:"im"`
|
||||
Portrait string `json:"portrait"`
|
||||
Intro string `json:"intro"`
|
||||
IsRoot int `json:"is_root"`
|
||||
LeaderId int64 `json:"leader_id"`
|
||||
LeaderName string `json:"leader_name"`
|
||||
CreateAt time.Time `json:"create_at" xorm:"<-"`
|
||||
}
|
||||
|
||||
func (u *User) CopyLdapAttr(sr *ldap.SearchResult) {
|
||||
|
@ -640,3 +641,13 @@ func GetUsersNameByIds(ids string) ([]string, error) {
|
|||
}
|
||||
return names, err
|
||||
}
|
||||
|
||||
func UsersGet(where string, args ...interface{}) ([]User, error) {
|
||||
var objs []User
|
||||
err := DB["rdb"].Where(where, args...).Find(&objs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return objs, nil
|
||||
}
|
||||
|
|
|
@ -129,6 +129,7 @@ func Config(r *gin.Engine) {
|
|||
userLogin.GET("/resources/bindings", resourceBindingsGet)
|
||||
userLogin.GET("/resources/orphan", resourceOrphanGet)
|
||||
|
||||
userLogin.POST("/container/sync", containerSyncPost)
|
||||
}
|
||||
|
||||
v1 := r.Group("/v1/rdb").Use(shouldBeService())
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"github.com/didi/nightingale/src/models"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
|
@ -38,57 +37,7 @@ func v1ContainersBindPost(c *gin.Context) {
|
|||
bomb("items empty")
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
items[i].Validate()
|
||||
|
||||
node := Node(items[i].NID)
|
||||
if node.Leaf != 1 {
|
||||
bomb("node not leaf")
|
||||
}
|
||||
|
||||
res, err := models.ResourceGet("uuid=?", items[i].UUID)
|
||||
dangerous(err)
|
||||
|
||||
if res != nil {
|
||||
// 这个资源之前就已经存在过了,这次可能是更新了部分字段
|
||||
res.Name = items[i].Name
|
||||
res.Labels = items[i].Labels
|
||||
res.Extend = items[i].Extend
|
||||
dangerous(res.Update("name", "labels", "extend"))
|
||||
} else {
|
||||
// 之前没有过这个资源,在RDB注册这个资源
|
||||
res = new(models.Resource)
|
||||
res.UUID = items[i].UUID
|
||||
res.Ident = items[i].Ident
|
||||
res.Name = items[i].Name
|
||||
res.Labels = items[i].Labels
|
||||
res.Extend = items[i].Extend
|
||||
res.Cate = items[i].Cate
|
||||
res.Tenant = node.Tenant()
|
||||
dangerous(res.Save())
|
||||
}
|
||||
|
||||
dangerous(node.Bind([]int64{res.Id}))
|
||||
|
||||
// 第二个挂载位置:inner.${cate}
|
||||
innerCatePath := "inner." + node.Ident
|
||||
innerCateNode, err := models.NodeGet("path=?", innerCatePath)
|
||||
dangerous(err)
|
||||
|
||||
if innerCateNode == nil {
|
||||
innerNode, err := models.NodeGet("path=?", "inner")
|
||||
dangerous(err)
|
||||
|
||||
if innerNode == nil {
|
||||
bomb("inner node not exists")
|
||||
}
|
||||
|
||||
innerCateNode, err = innerNode.CreateChild(node.Ident, node.Name, "", node.Cate, "system", 1, 1, []int64{})
|
||||
dangerous(err)
|
||||
}
|
||||
|
||||
dangerous(innerCateNode.Bind([]int64{res.Id}))
|
||||
}
|
||||
resourceHttpRegister(count, items)
|
||||
|
||||
renderMessage(c, nil)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
|
@ -17,6 +18,39 @@ func resourceSearchGet(c *gin.Context) {
|
|||
renderData(c, list, err)
|
||||
}
|
||||
|
||||
type containerSyncForm struct {
|
||||
Name string `json:"name" binding:"required"`
|
||||
Items []v1ContainersRegisterItem `json:"items"`
|
||||
}
|
||||
|
||||
func containerSyncPost(c *gin.Context) {
|
||||
var sf containerSyncForm
|
||||
bind(c, &sf)
|
||||
|
||||
var (
|
||||
uuids []string
|
||||
)
|
||||
|
||||
list, err := models.ResourceGets("labels like ?",
|
||||
fmt.Sprintf("%%,resourceName=%s%%", sf.Name))
|
||||
dangerous(err)
|
||||
|
||||
for _, l := range list {
|
||||
uuids = append(uuids, l.UUID)
|
||||
}
|
||||
|
||||
dangerous(models.ResourceUnregister(uuids))
|
||||
|
||||
count := len(sf.Items)
|
||||
if count == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
resourceHttpRegister(count, sf.Items)
|
||||
|
||||
renderMessage(c, "")
|
||||
}
|
||||
|
||||
type resourceNotePutForm struct {
|
||||
Ids []int64 `json:"ids" binding:"required"`
|
||||
Note string `json:"note"`
|
||||
|
@ -28,6 +62,62 @@ func (f resourceNotePutForm) Validate() {
|
|||
}
|
||||
}
|
||||
|
||||
func resourceHttpRegister(count int, items []v1ContainersRegisterItem) {
|
||||
for i := 0; i < count; i++ {
|
||||
items[i].Validate()
|
||||
|
||||
node := Node(items[i].NID)
|
||||
if node.Leaf != 1 {
|
||||
bomb("node not leaf")
|
||||
}
|
||||
|
||||
res, err := models.ResourceGet("uuid=?", items[i].UUID)
|
||||
dangerous(err)
|
||||
|
||||
if res != nil {
|
||||
// 这个资源之前就已经存在过了,这次可能是更新了部分字段
|
||||
res.Name = items[i].Name
|
||||
res.Labels = items[i].Labels
|
||||
res.Extend = items[i].Extend
|
||||
dangerous(res.Update("name", "labels", "extend"))
|
||||
} else {
|
||||
// 之前没有过这个资源,在RDB注册这个资源
|
||||
res = new(models.Resource)
|
||||
res.UUID = items[i].UUID
|
||||
res.Ident = items[i].Ident
|
||||
res.Name = items[i].Name
|
||||
res.Labels = items[i].Labels
|
||||
res.Extend = items[i].Extend
|
||||
res.Cate = items[i].Cate
|
||||
res.Tenant = node.Tenant()
|
||||
dangerous(res.Save())
|
||||
}
|
||||
|
||||
dangerous(node.Bind([]int64{res.Id}))
|
||||
|
||||
// 第二个挂载位置:inner.${cate}
|
||||
innerCatePath := "inner." + node.Ident
|
||||
innerCateNode, err := models.NodeGet("path=?", innerCatePath)
|
||||
dangerous(err)
|
||||
|
||||
if innerCateNode == nil {
|
||||
innerNode, err := models.NodeGet("path=?", "inner")
|
||||
dangerous(err)
|
||||
|
||||
if innerNode == nil {
|
||||
bomb("inner node not exists")
|
||||
}
|
||||
|
||||
innerCateNode, err = innerNode.CreateChild(node.Ident, node.Name, "", node.Cate, "system", 1, 1, []int64{})
|
||||
dangerous(err)
|
||||
}
|
||||
|
||||
dangerous(innerCateNode.Bind([]int64{res.Id}))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// 游离资源页面修改备注,超级管理员,或者是租户管理员
|
||||
func resourceNotePut(c *gin.Context) {
|
||||
var f resourceNotePutForm
|
||||
|
|
|
@ -146,4 +146,4 @@ func roleGlobalUsersUnbind(c *gin.Context) {
|
|||
func v1RoleOperationGets(c *gin.Context) {
|
||||
objs, err := models.RoleOperationAll()
|
||||
renderData(c, objs, err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ const (
|
|||
METRIC_NAME = "__name__"
|
||||
SERIES_LIMIT = 1000
|
||||
DOCS_LIMIT = 100
|
||||
MAX_PONINTS = 720
|
||||
)
|
||||
|
||||
type M3dbSection struct {
|
||||
|
@ -51,6 +52,10 @@ type Client struct {
|
|||
namespaceID ident.ID
|
||||
}
|
||||
|
||||
func indexStartTime() time.Time {
|
||||
return time.Now().Add(-time.Hour * 25)
|
||||
}
|
||||
|
||||
func NewClient(cfg M3dbSection) (*Client, error) {
|
||||
client, err := cfg.Config.NewClient(client.ConfigurationParameters{})
|
||||
if err != nil {
|
||||
|
@ -267,7 +272,6 @@ func (p *Client) queryIndexByFullTags(session client.Session, input dataobj.Inde
|
|||
ret = dataobj.IndexByFullTagsResp{
|
||||
Metric: input.Metric,
|
||||
Tags: []string{},
|
||||
Step: 10,
|
||||
DsType: "GAUGE",
|
||||
}
|
||||
|
||||
|
@ -285,20 +289,22 @@ func (p *Client) queryIndexByFullTags(session client.Session, input dataobj.Inde
|
|||
}
|
||||
|
||||
ret.Endpoints = input.Endpoints
|
||||
tags := map[string]struct{}{}
|
||||
for iter.Next() {
|
||||
log.Printf("iter.next() ")
|
||||
_, _, tagIter := iter.Current()
|
||||
resp := xcludeResp(tagIter)
|
||||
if len(resp.Tags) > 0 {
|
||||
ret.Tags = append(ret.Tags, resp.Tags[0])
|
||||
if len(resp.Tags) > 0 && len(resp.Tags[0]) > 0 {
|
||||
tags[resp.Tags[0]] = struct{}{}
|
||||
}
|
||||
}
|
||||
for k, _ := range tags {
|
||||
ret.Tags = append(ret.Tags, k)
|
||||
}
|
||||
if err := iter.Err(); err != nil {
|
||||
logger.Errorf("FetchTaggedIDs iter:", err)
|
||||
}
|
||||
|
||||
return ret
|
||||
|
||||
}
|
||||
|
||||
// GetInstance: && (metric) (endpoint) (&& tags...)
|
||||
|
@ -438,12 +444,21 @@ func seriesIterWalk(iter encoding.SeriesIterator) (out *dataobj.TsdbQueryRespons
|
|||
|
||||
tagsIter := iter.Tags()
|
||||
tags := map[string]string{}
|
||||
var metric, endpoint string
|
||||
|
||||
for tagsIter.Next() {
|
||||
tag := tagsIter.Current()
|
||||
tags[tag.Name.String()] = tag.Value.String()
|
||||
k := tag.Name.String()
|
||||
v := tag.Value.String()
|
||||
switch k {
|
||||
case METRIC_NAME:
|
||||
metric = v
|
||||
case ENDPOINT_NAME, NID_NAME:
|
||||
endpoint = v
|
||||
default:
|
||||
tags[k] = v
|
||||
}
|
||||
}
|
||||
metric := tags[METRIC_NAME]
|
||||
endpoint := tags[ENDPOINT_NAME]
|
||||
counter, err := dataobj.GetCounter(metric, "", tags)
|
||||
|
||||
return &dataobj.TsdbQueryResponse{
|
||||
|
@ -483,7 +498,7 @@ func (cfg M3dbSection) validateTime(start, end int64, step *int) error {
|
|||
}
|
||||
|
||||
if *step == 0 {
|
||||
*step = int((end - start) / 720)
|
||||
*step = int((end - start) / MAX_PONINTS)
|
||||
}
|
||||
|
||||
if *step > cfg.MinStep {
|
||||
|
|
|
@ -134,7 +134,7 @@ func (cfg M3dbSection) queryMetricsOptions(input dataobj.EndpointsRecv) (index.Q
|
|||
)},
|
||||
index.AggregationOptions{
|
||||
QueryOptions: index.QueryOptions{
|
||||
StartInclusive: time.Time{},
|
||||
StartInclusive: indexStartTime(),
|
||||
EndExclusive: time.Now(),
|
||||
SeriesLimit: cfg.SeriesLimit,
|
||||
DocsLimit: cfg.DocsLimit,
|
||||
|
@ -153,7 +153,7 @@ func (cfg M3dbSection) queryTagPairsOptions(input dataobj.EndpointMetricRecv) (i
|
|||
return index.Query{idx.NewConjunctionQuery(q1, q2)},
|
||||
index.AggregationOptions{
|
||||
QueryOptions: index.QueryOptions{
|
||||
StartInclusive: time.Time{},
|
||||
StartInclusive: indexStartTime(),
|
||||
EndExclusive: time.Now(),
|
||||
SeriesLimit: cfg.SeriesLimit,
|
||||
DocsLimit: cfg.DocsLimit,
|
||||
|
@ -188,7 +188,7 @@ func (cfg M3dbSection) queryIndexByCludeOptions(input dataobj.CludeRecv) (index.
|
|||
}
|
||||
|
||||
return query, index.QueryOptions{
|
||||
StartInclusive: time.Time{},
|
||||
StartInclusive: indexStartTime(),
|
||||
EndExclusive: time.Now(),
|
||||
SeriesLimit: cfg.SeriesLimit,
|
||||
DocsLimit: cfg.DocsLimit,
|
||||
|
@ -218,7 +218,7 @@ func (cfg M3dbSection) queryIndexByFullTagsOptions(input dataobj.IndexByFullTags
|
|||
}
|
||||
|
||||
return query, index.QueryOptions{
|
||||
StartInclusive: time.Time{},
|
||||
StartInclusive: indexStartTime(),
|
||||
EndExclusive: time.Now(),
|
||||
SeriesLimit: cfg.SeriesLimit,
|
||||
DocsLimit: cfg.DocsLimit,
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
curl -X POST \
|
||||
http://localhost:8008/api/index/metrics \
|
||||
-d '{
|
||||
"endpoints": []
|
||||
"endpoints": ["10.178.24.116"]
|
||||
}'
|
||||
|
||||
|
||||
|
|
|
@ -148,5 +148,10 @@ func GetIndexByFullTags(c *gin.Context) {
|
|||
}
|
||||
|
||||
resp := dataSource.QueryIndexByFullTags(recvs)
|
||||
render.Data(c, resp, nil)
|
||||
render.Data(c, &listResp{List: resp, Count: len(resp)}, nil)
|
||||
}
|
||||
|
||||
type listResp struct {
|
||||
List interface{} `json:"list"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
|
@ -0,0 +1,15 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
||||
install:
|
||||
- go install ./...
|
||||
- go get github.com/BurntSushi/toml-test
|
||||
script:
|
||||
- export PATH="$PATH:$HOME/gopath/bin"
|
||||
- make test
|
|
@ -0,0 +1,3 @@
|
|||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
|
||||
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,19 @@
|
|||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
|
@ -0,0 +1,218 @@
|
|||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
|
@ -0,0 +1,509 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
|
@ -0,0 +1,568 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
}
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float() == 0.0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
|
@ -0,0 +1,18 @@
|
|||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
|
@ -0,0 +1,953 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.atEOF = true
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
func (lx *lexer) backup() {
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("unexpected EOF")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a newline for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
return lexBareTableName
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
return lexBareTableName
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.push(lexKeyEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
return lexBareKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
||||
// (which is not whitespace) has not yet been consumed.
|
||||
func lexBareKey(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
||||
// separator).
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case '+', '-':
|
||||
return lexNumberStart
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemRawMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
"escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumber
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
|
@ -0,0 +1,592 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a string for a key (or part of a key in a table name).
|
||||
func (p *parser) keyString(it item) string {
|
||||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
|
@ -0,0 +1,91 @@
|
|||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
case itemRawString:
|
||||
return tomlString
|
||||
case itemRawMultilineString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,51 @@
|
|||
# pcg
|
||||
Go implementation of Melissa O'Neill's excellent PCG pseudorandom number generator, which is
|
||||
well-studied, excellent, and fast both to create and in execution.
|
||||
|
||||
````
|
||||
Performance on a MacBook Pro:
|
||||
|
||||
$ go test -v -bench=.
|
||||
=== RUN TestSanity32
|
||||
--- PASS: TestSanity32 (0.00s)
|
||||
=== RUN TestSum32
|
||||
--- PASS: TestSum32 (0.00s)
|
||||
=== RUN TestAdvance32
|
||||
--- PASS: TestAdvance32 (0.00s)
|
||||
=== RUN TestRetreat32
|
||||
--- PASS: TestRetreat32 (0.00s)
|
||||
=== RUN TestSanity64
|
||||
--- PASS: TestSanity64 (0.00s)
|
||||
=== RUN TestSum64
|
||||
--- PASS: TestSum64 (0.00s)
|
||||
=== RUN TestAdvance64
|
||||
--- PASS: TestAdvance64 (0.00s)
|
||||
=== RUN TestRetreat64
|
||||
--- PASS: TestRetreat64 (0.00s)
|
||||
=== RUN ExampleReport32
|
||||
--- PASS: ExampleReport32 (0.00s)
|
||||
=== RUN ExampleReport64
|
||||
--- PASS: ExampleReport64 (0.00s)
|
||||
BenchmarkNew32-8 2000000000 1.09 ns/op
|
||||
BenchmarkRandom32-8 1000000000 2.49 ns/op
|
||||
BenchmarkBounded32-8 200000000 9.75 ns/op
|
||||
BenchmarkNew64-8 200000000 6.89 ns/op
|
||||
BenchmarkRandom64-8 200000000 7.58 ns/op
|
||||
BenchmarkBounded64-8 50000000 25.5 ns/op
|
||||
````
|
||||
|
||||
Provided under terms of the Apache license in keeping with Melissa O'Neill's original from which this was ported.
|
||||
|
||||
Copyright 2018 Michael T. Jones
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,86 @@
|
|||
package pcg
|
||||
|
||||
// PCG Random Number Generation
|
||||
// Developed by Melissa O'Neill <oneill@pcg-random.org>
|
||||
// Paper and details at http://www.pcg-random.org
|
||||
// Ported to Go by Michael Jones <michael.jones@gmail.com>
|
||||
|
||||
// Copyright 2018 Michael T. Jones
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
|
||||
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
|
||||
// the specific language governing permissions and limitations under the License.
|
||||
|
||||
const (
|
||||
pcg32State = 0x853c49e6748fea9b // 9600629759793949339
|
||||
pcg32Increment = 0xda3e39cb94b95bdb // 15726070495360670683
|
||||
pcg32Multiplier = 0x5851f42d4c957f2d // 6364136223846793005
|
||||
)
|
||||
|
||||
type PCG32 struct {
|
||||
state uint64
|
||||
increment uint64
|
||||
}
|
||||
|
||||
func NewPCG32() *PCG32 {
|
||||
return &PCG32{pcg32State, pcg32Increment}
|
||||
}
|
||||
|
||||
func (p *PCG32) Seed(state, sequence uint64) *PCG32 {
|
||||
p.increment = (sequence << 1) | 1
|
||||
p.state = (state+p.increment)*pcg32Multiplier + p.increment
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PCG32) Random() uint32 {
|
||||
// Advance 64-bit linear congruential generator to new state
|
||||
oldState := p.state
|
||||
p.state = oldState*pcg32Multiplier + p.increment
|
||||
|
||||
// Confuse and permute 32-bit output from old state
|
||||
xorShifted := uint32(((oldState >> 18) ^ oldState) >> 27)
|
||||
rot := uint32(oldState >> 59)
|
||||
return (xorShifted >> rot) | (xorShifted << ((-rot) & 31))
|
||||
}
|
||||
|
||||
func (p *PCG32) Bounded(bound uint32) uint32 {
|
||||
if bound == 0 {
|
||||
return 0
|
||||
}
|
||||
threshold := -bound % bound
|
||||
for {
|
||||
r := p.Random()
|
||||
if r >= threshold {
|
||||
return r % bound
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PCG32) Advance(delta uint64) *PCG32 {
|
||||
p.state = p.advanceLCG64(p.state, delta, pcg32Multiplier, p.increment)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PCG32) Retreat(delta uint64) *PCG32 {
|
||||
return p.Advance(-delta)
|
||||
}
|
||||
|
||||
func (p *PCG32) advanceLCG64(state, delta, curMult, curPlus uint64) uint64 {
|
||||
accMult := uint64(1)
|
||||
accPlus := uint64(0)
|
||||
for delta > 0 {
|
||||
if delta&1 != 0 {
|
||||
accMult *= curMult
|
||||
accPlus = accPlus*curMult + curPlus
|
||||
}
|
||||
curPlus = (curMult + 1) * curPlus
|
||||
curMult *= curMult
|
||||
delta /= 2
|
||||
}
|
||||
return accMult*state + accPlus
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package pcg
|
||||
|
||||
// PCG Random Number Generation
|
||||
// Developed by Melissa O'Neill <oneill@pcg-random.org>
|
||||
// Paper and details at http://www.pcg-random.org
|
||||
// Ported to Go by Michael Jones <michael.jones@gmail.com>
|
||||
|
||||
// Copyright 2018 Michael T. Jones
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
|
||||
// with the License. You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed
|
||||
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
|
||||
// the specific language governing permissions and limitations under the License.
|
||||
|
||||
type PCG64 struct {
|
||||
lo *PCG32
|
||||
hi *PCG32
|
||||
}
|
||||
|
||||
func NewPCG64() *PCG64 {
|
||||
return &PCG64{NewPCG32(), NewPCG32()}
|
||||
}
|
||||
|
||||
func (p *PCG64) Seed(state1, state2, sequence1, sequence2 uint64) *PCG64 {
|
||||
mask := ^uint64(0) >> 1
|
||||
if sequence1&mask == sequence2&mask {
|
||||
sequence2 = ^sequence2
|
||||
}
|
||||
p.lo.Seed(state1, sequence1)
|
||||
p.hi.Seed(state2, sequence2)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PCG64) Random() uint64 {
|
||||
return uint64(p.hi.Random())<<32 | uint64(p.lo.Random())
|
||||
}
|
||||
|
||||
func (p *PCG64) Bounded(bound uint64) uint64 {
|
||||
if bound == 0 {
|
||||
return 0
|
||||
}
|
||||
threshold := -bound % bound
|
||||
for {
|
||||
r := p.Random()
|
||||
if r >= threshold {
|
||||
return r % bound
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *PCG64) Advance(delta uint64) *PCG64 {
|
||||
p.lo.Advance(delta)
|
||||
p.hi.Advance(delta)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *PCG64) Retreat(delta uint64) *PCG64 {
|
||||
return p.Advance(-delta)
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------
|
||||
SOFTWARE DISTRIBUTED WITH THRIFT:
|
||||
|
||||
The Apache Thrift software includes a number of subcomponents with
|
||||
separate copyright notices and license terms. Your use of the source
|
||||
code for the these subcomponents is subject to the terms and
|
||||
conditions of the following licenses.
|
||||
|
||||
--------------------------------------------------
|
||||
Portions of the following files are licensed under the MIT License:
|
||||
|
||||
lib/erl/src/Makefile.am
|
||||
|
||||
Please see doc/otp-base-license.txt for the full terms of this license.
|
||||
|
||||
--------------------------------------------------
|
||||
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
|
||||
|
||||
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
|
||||
#
|
||||
# Copying and distribution of this file, with or without
|
||||
# modification, are permitted in any medium without royalty provided
|
||||
# the copyright notice and this notice are preserved.
|
||||
|
||||
--------------------------------------------------
|
||||
For the compiler/cpp/src/thrift/md5.[ch] components:
|
||||
|
||||
/*
|
||||
Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
L. Peter Deutsch
|
||||
ghost@aladdin.com
|
||||
|
||||
*/
|
|
@ -0,0 +1,5 @@
|
|||
Apache Thrift
|
||||
Copyright 2006-2010 The Apache Software Foundation.
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
142
vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go
generated
vendored
Normal file
142
vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
const (
|
||||
UNKNOWN_APPLICATION_EXCEPTION = 0
|
||||
UNKNOWN_METHOD = 1
|
||||
INVALID_MESSAGE_TYPE_EXCEPTION = 2
|
||||
WRONG_METHOD_NAME = 3
|
||||
BAD_SEQUENCE_ID = 4
|
||||
MISSING_RESULT = 5
|
||||
INTERNAL_ERROR = 6
|
||||
PROTOCOL_ERROR = 7
|
||||
)
|
||||
|
||||
// Application level Thrift exception
|
||||
type TApplicationException interface {
|
||||
TException
|
||||
TypeId() int32
|
||||
Read(iprot TProtocol) (TApplicationException, error)
|
||||
Write(oprot TProtocol) error
|
||||
}
|
||||
|
||||
type tApplicationException struct {
|
||||
message string
|
||||
type_ int32
|
||||
}
|
||||
|
||||
func (e tApplicationException) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
func NewTApplicationException(type_ int32, message string) TApplicationException {
|
||||
return &tApplicationException{message, type_}
|
||||
}
|
||||
|
||||
func (p *tApplicationException) TypeId() int32 {
|
||||
return p.type_
|
||||
}
|
||||
|
||||
func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
|
||||
_, err := iprot.ReadStructBegin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
message := ""
|
||||
type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
|
||||
|
||||
for {
|
||||
_, ttype, id, err := iprot.ReadFieldBegin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ttype == STOP {
|
||||
break
|
||||
}
|
||||
switch id {
|
||||
case 1:
|
||||
if ttype == STRING {
|
||||
if message, err = iprot.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if ttype == I32 {
|
||||
if type_, err = iprot.ReadI32(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
default:
|
||||
if err = SkipDefaultDepth(iprot, ttype); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = iprot.ReadFieldEnd(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return NewTApplicationException(type_, message), iprot.ReadStructEnd()
|
||||
}
|
||||
|
||||
func (p *tApplicationException) Write(oprot TProtocol) (err error) {
|
||||
err = oprot.WriteStructBegin("TApplicationException")
|
||||
if len(p.Error()) > 0 {
|
||||
err = oprot.WriteFieldBegin("message", STRING, 1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteString(p.Error())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteFieldEnd()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
err = oprot.WriteFieldBegin("type", I32, 2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteI32(p.type_)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteFieldEnd()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteFieldStop()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = oprot.WriteStructEnd()
|
||||
return
|
||||
}
|
|
@ -0,0 +1,560 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// maxBytesPoolAlloc is the constant for how big the slices being allocated
|
||||
// from the bytes pool are, if the bytes required is larger then they should
|
||||
// not come from the pool.
|
||||
var maxBytesPoolAlloc = 1024
|
||||
|
||||
// SetMaxBytesPoolAlloc sets the max bytes that are pooled for binary thrift
|
||||
// fields and must be called before any thrift binary protocols are used
|
||||
// since it is a global and is not thread safe to edit.
|
||||
func SetMaxBytesPoolAlloc(size int) {
|
||||
maxBytesPoolAlloc = size
|
||||
}
|
||||
|
||||
// MaxBytesPoolAlloc returns the current max bytes that are pooled for binary
|
||||
// thrift fields.
|
||||
func MaxBytesPoolAlloc() int {
|
||||
return maxBytesPoolAlloc
|
||||
}
|
||||
|
||||
// BytesPoolPut is a public func to call to return pooled bytes to, each
|
||||
// the capacity of BytesPoolAlloc. TBinaryProtocol.ReadBinary uses this pool
|
||||
// to allocate from if the size of the bytes required to return is is equal or
|
||||
// less than BytesPoolAlloc.
|
||||
func BytesPoolPut(b []byte) bool {
|
||||
if cap(b) != maxBytesPoolAlloc {
|
||||
return false
|
||||
}
|
||||
element := bytesWrapperPool.Get().(*bytesWrapper)
|
||||
element.value = b
|
||||
bytesPool.Put(element)
|
||||
return true
|
||||
}
|
||||
|
||||
// BytesPoolGet returns a pooled byte slice of capacity BytesPoolAlloc.
|
||||
func BytesPoolGet() []byte {
|
||||
element := bytesPool.Get().(*bytesWrapper)
|
||||
result := element.value
|
||||
element.value = nil
|
||||
bytesWrapperPool.Put(element)
|
||||
return result
|
||||
}
|
||||
|
||||
// bytesWrapper is used to wrap a byte slice to avoid allocing a interface{}
|
||||
// when wrapping a byte slice which is usually passed on the stack
|
||||
type bytesWrapper struct {
|
||||
value []byte
|
||||
}
|
||||
|
||||
var bytesWrapperPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &bytesWrapper{}
|
||||
},
|
||||
}
|
||||
|
||||
var bytesPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
element := bytesWrapperPool.Get().(*bytesWrapper)
|
||||
element.value = make([]byte, maxBytesPoolAlloc)
|
||||
return element
|
||||
},
|
||||
}
|
||||
|
||||
type TBinaryProtocol struct {
|
||||
trans TRichTransport
|
||||
origTransport TTransport
|
||||
reader io.Reader
|
||||
writer io.Writer
|
||||
strictRead bool
|
||||
strictWrite bool
|
||||
buffer [64]byte
|
||||
}
|
||||
|
||||
type TBinaryProtocolFactory struct {
|
||||
strictRead bool
|
||||
strictWrite bool
|
||||
}
|
||||
|
||||
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
|
||||
return NewTBinaryProtocol(t, false, true)
|
||||
}
|
||||
|
||||
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
|
||||
p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
|
||||
if et, ok := t.(TRichTransport); ok {
|
||||
p.trans = et
|
||||
} else {
|
||||
p.trans = NewTRichTransport(t)
|
||||
}
|
||||
p.reader = p.trans
|
||||
p.writer = p.trans
|
||||
return p
|
||||
}
|
||||
|
||||
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
|
||||
return NewTBinaryProtocolFactory(false, true)
|
||||
}
|
||||
|
||||
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
|
||||
return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
|
||||
return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
|
||||
}
|
||||
|
||||
/**
|
||||
* Writing Methods
|
||||
*/
|
||||
|
||||
func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
|
||||
if p.strictWrite {
|
||||
version := uint32(VERSION_1) | uint32(typeId)
|
||||
e := p.WriteI32(int32(version))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteString(name)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(seqId)
|
||||
return e
|
||||
} else {
|
||||
e := p.WriteString(name)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteByte(int8(typeId))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(seqId)
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteMessageEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteStructBegin(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteStructEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
e := p.WriteByte(int8(typeId))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI16(id)
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteFieldEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteFieldStop() error {
|
||||
e := p.WriteByte(STOP)
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
e := p.WriteByte(int8(keyType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteByte(int8(valueType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(int32(size))
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteMapEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
e := p.WriteByte(int8(elemType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(int32(size))
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteListEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
e := p.WriteByte(int8(elemType))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
e = p.WriteI32(int32(size))
|
||||
return e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteSetEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteBool(value bool) error {
|
||||
if value {
|
||||
return p.WriteByte(1)
|
||||
}
|
||||
return p.WriteByte(0)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteByte(value int8) error {
|
||||
e := p.trans.WriteByte(byte(value))
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteI16(value int16) error {
|
||||
v := p.buffer[0:2]
|
||||
binary.BigEndian.PutUint16(v, uint16(value))
|
||||
_, e := p.writer.Write(v)
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteI32(value int32) error {
|
||||
v := p.buffer[0:4]
|
||||
binary.BigEndian.PutUint32(v, uint32(value))
|
||||
_, e := p.writer.Write(v)
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteI64(value int64) error {
|
||||
v := p.buffer[0:8]
|
||||
binary.BigEndian.PutUint64(v, uint64(value))
|
||||
_, err := p.writer.Write(v)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteDouble(value float64) error {
|
||||
return p.WriteI64(int64(math.Float64bits(value)))
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteString(value string) error {
|
||||
e := p.WriteI32(int32(len(value)))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
_, err := p.trans.WriteString(value)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) WriteBinary(value []byte) error {
|
||||
e := p.WriteI32(int32(len(value)))
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
_, err := p.writer.Write(value)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
/**
|
||||
* Reading methods
|
||||
*/
|
||||
|
||||
func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
size, e := p.ReadI32()
|
||||
if e != nil {
|
||||
return "", typeId, 0, NewTProtocolException(e)
|
||||
}
|
||||
if size < 0 {
|
||||
typeId = TMessageType(size & 0x0ff)
|
||||
version := int64(int64(size) & VERSION_MASK)
|
||||
if version != VERSION_1 {
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
|
||||
}
|
||||
name, e = p.ReadString()
|
||||
if e != nil {
|
||||
return name, typeId, seqId, NewTProtocolException(e)
|
||||
}
|
||||
seqId, e = p.ReadI32()
|
||||
if e != nil {
|
||||
return name, typeId, seqId, NewTProtocolException(e)
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
if p.strictRead {
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
|
||||
}
|
||||
name, e2 := p.readStringBody(size)
|
||||
if e2 != nil {
|
||||
return name, typeId, seqId, e2
|
||||
}
|
||||
b, e3 := p.ReadByte()
|
||||
if e3 != nil {
|
||||
return name, typeId, seqId, e3
|
||||
}
|
||||
typeId = TMessageType(b)
|
||||
seqId, e4 := p.ReadI32()
|
||||
if e4 != nil {
|
||||
return name, typeId, seqId, e4
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadMessageEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadStructEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
|
||||
t, err := p.ReadByte()
|
||||
typeId = TType(t)
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if t != STOP {
|
||||
seqId, err = p.ReadI16()
|
||||
}
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadFieldEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
|
||||
|
||||
func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
|
||||
k, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
kType = TType(k)
|
||||
v, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
vType = TType(v)
|
||||
size32, e := p.ReadI32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
return kType, vType, size, nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadMapEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
b, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
elemType = TType(b)
|
||||
size32, e := p.ReadI32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadListEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
b, e := p.ReadByte()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
elemType = TType(b)
|
||||
size32, e := p.ReadI32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
return elemType, size, nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadSetEnd() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadBool() (bool, error) {
|
||||
b, e := p.ReadByte()
|
||||
v := true
|
||||
if b != 1 {
|
||||
v = false
|
||||
}
|
||||
return v, e
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.trans.ReadByte()
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
|
||||
buf := p.buffer[0:2]
|
||||
err = p.readAll(buf)
|
||||
value = int16(binary.BigEndian.Uint16(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
|
||||
buf := p.buffer[0:4]
|
||||
err = p.readAll(buf)
|
||||
value = int32(binary.BigEndian.Uint32(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
|
||||
buf := p.buffer[0:8]
|
||||
err = p.readAll(buf)
|
||||
value = int64(binary.BigEndian.Uint64(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
|
||||
buf := p.buffer[0:8]
|
||||
err = p.readAll(buf)
|
||||
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
|
||||
return value, err
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadString() (value string, err error) {
|
||||
size, e := p.ReadI32()
|
||||
if e != nil {
|
||||
return "", e
|
||||
}
|
||||
if size < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
|
||||
return p.readStringBody(size)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
|
||||
size, e := p.ReadI32()
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
if size < 0 {
|
||||
return nil, invalidDataLength
|
||||
}
|
||||
if uint64(size) > p.trans.RemainingBytes() {
|
||||
return nil, invalidDataLength
|
||||
}
|
||||
|
||||
isize := int(size)
|
||||
|
||||
var buf []byte
|
||||
if isize <= maxBytesPoolAlloc {
|
||||
buf = BytesPoolGet()[:isize]
|
||||
} else {
|
||||
buf = make([]byte, isize)
|
||||
}
|
||||
|
||||
_, err := io.ReadFull(p.trans, buf)
|
||||
return buf, NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) Flush() (err error) {
|
||||
return NewTProtocolException(p.trans.Flush())
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) Transport() TTransport {
|
||||
return p.origTransport
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) readAll(buf []byte) error {
|
||||
_, err := io.ReadFull(p.reader, buf)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
|
||||
if size < 0 {
|
||||
return "", nil
|
||||
}
|
||||
if uint64(size) > p.trans.RemainingBytes() {
|
||||
return "", invalidDataLength
|
||||
}
|
||||
var buf []byte
|
||||
if int(size) <= len(p.buffer) {
|
||||
buf = p.buffer[0:size]
|
||||
} else {
|
||||
buf = make([]byte, size)
|
||||
}
|
||||
_, e := io.ReadFull(p.trans, buf)
|
||||
return string(buf), NewTProtocolException(e)
|
||||
}
|
75
vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go
generated
vendored
Normal file
75
vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
)
|
||||
|
||||
type TBufferedTransportFactory struct {
|
||||
size int
|
||||
}
|
||||
|
||||
type TBufferedTransport struct {
|
||||
bufio.ReadWriter
|
||||
tp TTransport
|
||||
}
|
||||
|
||||
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
return NewTBufferedTransport(trans, p.size)
|
||||
}
|
||||
|
||||
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
|
||||
return &TBufferedTransportFactory{size: bufferSize}
|
||||
}
|
||||
|
||||
func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
|
||||
return &TBufferedTransport{
|
||||
ReadWriter: bufio.ReadWriter{
|
||||
Reader: bufio.NewReaderSize(trans, bufferSize),
|
||||
Writer: bufio.NewWriterSize(trans, bufferSize),
|
||||
},
|
||||
tp: trans,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) IsOpen() bool {
|
||||
return p.tp.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Open() (err error) {
|
||||
return p.tp.Open()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Close() (err error) {
|
||||
return p.tp.Close()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Flush() error {
|
||||
if err := p.ReadWriter.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return p.tp.Flush()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return p.tp.RemainingBytes()
|
||||
}
|
||||
|
815
vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go
generated
vendored
Normal file
815
vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go
generated
vendored
Normal file
|
@ -0,0 +1,815 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
COMPACT_PROTOCOL_ID = 0x082
|
||||
COMPACT_VERSION = 1
|
||||
COMPACT_VERSION_MASK = 0x1f
|
||||
COMPACT_TYPE_MASK = 0x0E0
|
||||
COMPACT_TYPE_BITS = 0x07
|
||||
COMPACT_TYPE_SHIFT_AMOUNT = 5
|
||||
)
|
||||
|
||||
type tCompactType byte
|
||||
|
||||
const (
|
||||
COMPACT_BOOLEAN_TRUE = 0x01
|
||||
COMPACT_BOOLEAN_FALSE = 0x02
|
||||
COMPACT_BYTE = 0x03
|
||||
COMPACT_I16 = 0x04
|
||||
COMPACT_I32 = 0x05
|
||||
COMPACT_I64 = 0x06
|
||||
COMPACT_DOUBLE = 0x07
|
||||
COMPACT_BINARY = 0x08
|
||||
COMPACT_LIST = 0x09
|
||||
COMPACT_SET = 0x0A
|
||||
COMPACT_MAP = 0x0B
|
||||
COMPACT_STRUCT = 0x0C
|
||||
)
|
||||
|
||||
var (
|
||||
ttypeToCompactType map[TType]tCompactType
|
||||
)
|
||||
|
||||
func init() {
|
||||
ttypeToCompactType = map[TType]tCompactType{
|
||||
STOP: STOP,
|
||||
BOOL: COMPACT_BOOLEAN_TRUE,
|
||||
BYTE: COMPACT_BYTE,
|
||||
I16: COMPACT_I16,
|
||||
I32: COMPACT_I32,
|
||||
I64: COMPACT_I64,
|
||||
DOUBLE: COMPACT_DOUBLE,
|
||||
STRING: COMPACT_BINARY,
|
||||
LIST: COMPACT_LIST,
|
||||
SET: COMPACT_SET,
|
||||
MAP: COMPACT_MAP,
|
||||
STRUCT: COMPACT_STRUCT,
|
||||
}
|
||||
}
|
||||
|
||||
type TCompactProtocolFactory struct{}
|
||||
|
||||
func NewTCompactProtocolFactory() *TCompactProtocolFactory {
|
||||
return &TCompactProtocolFactory{}
|
||||
}
|
||||
|
||||
func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return NewTCompactProtocol(trans)
|
||||
}
|
||||
|
||||
type TCompactProtocol struct {
|
||||
trans TRichTransport
|
||||
origTransport TTransport
|
||||
|
||||
// Used to keep track of the last field for the current and previous structs,
|
||||
// so we can do the delta stuff.
|
||||
lastField []int
|
||||
lastFieldId int
|
||||
|
||||
// If we encounter a boolean field begin, save the TField here so it can
|
||||
// have the value incorporated.
|
||||
booleanFieldName string
|
||||
booleanFieldId int16
|
||||
booleanFieldPending bool
|
||||
|
||||
// If we read a field header, and it's a boolean field, save the boolean
|
||||
// value here so that readBool can use it.
|
||||
boolValue bool
|
||||
boolValueIsNotNull bool
|
||||
buffer [64]byte
|
||||
}
|
||||
|
||||
// Create a TCompactProtocol given a TTransport
|
||||
func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
|
||||
p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
|
||||
if et, ok := trans.(TRichTransport); ok {
|
||||
p.trans = et
|
||||
} else {
|
||||
p.trans = NewTRichTransport(trans)
|
||||
}
|
||||
|
||||
return p
|
||||
|
||||
}
|
||||
|
||||
//
|
||||
// Public Writing methods.
|
||||
//
|
||||
|
||||
// Write a message header to the wire. Compact Protocol messages contain the
|
||||
// protocol version so we can migrate forwards in the future if need be.
|
||||
func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
_, err = p.writeVarint32(seqid)
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
e := p.WriteString(name)
|
||||
return e
|
||||
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
|
||||
|
||||
// Write a struct begin. This doesn't actually put anything on the wire. We
|
||||
// use it as an opportunity to put special placeholder markers on the field
|
||||
// stack so we can get the field id deltas correct.
|
||||
func (p *TCompactProtocol) WriteStructBegin(name string) error {
|
||||
p.lastField = append(p.lastField, p.lastFieldId)
|
||||
p.lastFieldId = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write a struct end. This doesn't actually put anything on the wire. We use
|
||||
// this as an opportunity to pop the last field from the current struct off
|
||||
// of the field stack.
|
||||
func (p *TCompactProtocol) WriteStructEnd() error {
|
||||
p.lastFieldId = p.lastField[len(p.lastField)-1]
|
||||
p.lastField = p.lastField[:len(p.lastField)-1]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
if typeId == BOOL {
|
||||
// we want to possibly include the value, so we'll wait.
|
||||
p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
|
||||
return nil
|
||||
}
|
||||
_, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// The workhorse of writeFieldBegin. It has the option of doing a
|
||||
// 'type override' of the type header. This is used specifically in the
|
||||
// boolean field case.
|
||||
func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
|
||||
// short lastField = lastField_.pop();
|
||||
|
||||
// if there's a type override, use that.
|
||||
var typeToWrite byte
|
||||
if typeOverride == 0xFF {
|
||||
typeToWrite = byte(p.getCompactType(typeId))
|
||||
} else {
|
||||
typeToWrite = typeOverride
|
||||
}
|
||||
// check if we can use delta encoding for the field id
|
||||
fieldId := int(id)
|
||||
written := 0
|
||||
if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
|
||||
// write them together
|
||||
err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
// write them separate
|
||||
err := p.writeByteDirect(typeToWrite)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = p.WriteI16(id)
|
||||
written = 1 + 2
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
p.lastFieldId = fieldId
|
||||
// p.lastField.Push(field.id);
|
||||
return written, nil
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
|
||||
|
||||
func (p *TCompactProtocol) WriteFieldStop() error {
|
||||
err := p.writeByteDirect(STOP)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
if size == 0 {
|
||||
err := p.writeByteDirect(0)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
_, err := p.writeVarint32(int32(size))
|
||||
if err != nil {
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteMapEnd() error { return nil }
|
||||
|
||||
// Write a list header.
|
||||
func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
_, err := p.writeCollectionBegin(elemType, size)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteListEnd() error { return nil }
|
||||
|
||||
// Write a set header.
|
||||
func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
_, err := p.writeCollectionBegin(elemType, size)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) WriteSetEnd() error { return nil }
|
||||
|
||||
func (p *TCompactProtocol) WriteBool(value bool) error {
|
||||
v := byte(COMPACT_BOOLEAN_FALSE)
|
||||
if value {
|
||||
v = byte(COMPACT_BOOLEAN_TRUE)
|
||||
}
|
||||
if p.booleanFieldPending {
|
||||
// we haven't written the field header yet
|
||||
_, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
|
||||
p.booleanFieldPending = false
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
// we're not part of a field, so just write the value.
|
||||
err := p.writeByteDirect(v)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write a byte. Nothing to see here!
|
||||
func (p *TCompactProtocol) WriteByte(value int8) error {
|
||||
err := p.writeByteDirect(byte(value))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write an I16 as a zigzag varint.
|
||||
func (p *TCompactProtocol) WriteI16(value int16) error {
|
||||
_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write an i32 as a zigzag varint.
|
||||
func (p *TCompactProtocol) WriteI32(value int32) error {
|
||||
_, err := p.writeVarint32(p.int32ToZigzag(value))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write an i64 as a zigzag varint.
|
||||
func (p *TCompactProtocol) WriteI64(value int64) error {
|
||||
_, err := p.writeVarint64(p.int64ToZigzag(value))
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write a double to the wire as 8 bytes.
|
||||
func (p *TCompactProtocol) WriteDouble(value float64) error {
|
||||
buf := p.buffer[0:8]
|
||||
binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
|
||||
_, err := p.trans.Write(buf)
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
// Write a string to the wire with a varint size preceding.
|
||||
func (p *TCompactProtocol) WriteString(value string) error {
|
||||
_, e := p.writeVarint32(int32(len(value)))
|
||||
if e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if len(value) > 0 {
|
||||
}
|
||||
_, e = p.trans.WriteString(value)
|
||||
return e
|
||||
}
|
||||
|
||||
// Write a byte array, using a varint for the size.
|
||||
func (p *TCompactProtocol) WriteBinary(bin []byte) error {
|
||||
_, e := p.writeVarint32(int32(len(bin)))
|
||||
if e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if len(bin) > 0 {
|
||||
_, e = p.trans.Write(bin)
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// Reading methods.
|
||||
//
|
||||
|
||||
// Read a message header.
|
||||
func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
|
||||
protocolId, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if protocolId != COMPACT_PROTOCOL_ID {
|
||||
e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
|
||||
return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
|
||||
}
|
||||
|
||||
versionAndType, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
version := versionAndType & COMPACT_VERSION_MASK
|
||||
typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
|
||||
if version != COMPACT_VERSION {
|
||||
e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
|
||||
err = NewTProtocolExceptionWithType(BAD_VERSION, e)
|
||||
return
|
||||
}
|
||||
seqId, e := p.readVarint32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
name, err = p.ReadString()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
|
||||
|
||||
// Read a struct begin. There's nothing on the wire for this, but it is our
|
||||
// opportunity to push a new struct begin marker onto the field stack.
|
||||
func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
|
||||
p.lastField = append(p.lastField, p.lastFieldId)
|
||||
p.lastFieldId = 0
|
||||
return
|
||||
}
|
||||
|
||||
// Doesn't actually consume any wire data, just removes the last field for
|
||||
// this struct from the field stack.
|
||||
func (p *TCompactProtocol) ReadStructEnd() error {
|
||||
// consume the last field we read off the wire.
|
||||
p.lastFieldId = p.lastField[len(p.lastField)-1]
|
||||
p.lastField = p.lastField[:len(p.lastField)-1]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read a field header off the wire.
|
||||
func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
|
||||
t, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// if it's a stop, then we can return immediately, as the struct is over.
|
||||
if (t & 0x0f) == STOP {
|
||||
return "", STOP, 0, nil
|
||||
}
|
||||
|
||||
// mask off the 4 MSB of the type header. it could contain a field id delta.
|
||||
modifier := int16((t & 0xf0) >> 4)
|
||||
if modifier == 0 {
|
||||
// not a delta. look ahead for the zigzag varint field id.
|
||||
id, err = p.ReadI16()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// has a delta. add the delta to the last read field id.
|
||||
id = int16(p.lastFieldId) + modifier
|
||||
}
|
||||
typeId, e := p.getTType(tCompactType(t & 0x0f))
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
|
||||
// if this happens to be a boolean field, the value is encoded in the type
|
||||
if p.isBoolType(t) {
|
||||
// save the boolean value in a special instance variable.
|
||||
p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
|
||||
p.boolValueIsNotNull = true
|
||||
}
|
||||
|
||||
// push the new field onto the field stack so we can keep the deltas going.
|
||||
p.lastFieldId = int(id)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
|
||||
|
||||
// Read a map header off the wire. If the size is zero, skip reading the key
|
||||
// and value type. This means that 0-length maps will yield TMaps without the
|
||||
// "correct" types.
|
||||
func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
|
||||
size32, e := p.readVarint32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size32 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size32)
|
||||
|
||||
keyAndValueType := byte(STOP)
|
||||
if size != 0 {
|
||||
keyAndValueType, err = p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
|
||||
valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadMapEnd() error { return nil }
|
||||
|
||||
// Read a list header off the wire. If the list size is 0-14, the size will
|
||||
// be packed into the element type header. If it's a longer list, the 4 MSB
|
||||
// of the element type header will be 0xF, and a varint will follow with the
|
||||
// true size.
|
||||
func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
size_and_type, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
size = int((size_and_type >> 4) & 0x0f)
|
||||
if size == 15 {
|
||||
size2, e := p.readVarint32()
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
if size2 < 0 {
|
||||
err = invalidDataLength
|
||||
return
|
||||
}
|
||||
size = int(size2)
|
||||
}
|
||||
elemType, e := p.getTType(tCompactType(size_and_type))
|
||||
if e != nil {
|
||||
err = NewTProtocolException(e)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadListEnd() error { return nil }
|
||||
|
||||
// Read a set header off the wire. If the set size is 0-14, the size will
|
||||
// be packed into the element type header. If it's a longer set, the 4 MSB
|
||||
// of the element type header will be 0xF, and a varint will follow with the
|
||||
// true size.
|
||||
func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
return p.ReadListBegin()
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) ReadSetEnd() error { return nil }
|
||||
|
||||
// Read a boolean off the wire. If this is a boolean field, the value should
|
||||
// already have been read during readFieldBegin, so we'll just consume the
|
||||
// pre-stored value. Otherwise, read a byte.
|
||||
func (p *TCompactProtocol) ReadBool() (value bool, err error) {
|
||||
if p.boolValueIsNotNull {
|
||||
p.boolValueIsNotNull = false
|
||||
return p.boolValue, nil
|
||||
}
|
||||
v, err := p.readByteDirect()
|
||||
return v == COMPACT_BOOLEAN_TRUE, err
|
||||
}
|
||||
|
||||
// Read a single byte off the wire. Nothing interesting here.
|
||||
func (p *TCompactProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return 0, NewTProtocolException(err)
|
||||
}
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
// Read an i16 from the wire as a zigzag varint.
|
||||
func (p *TCompactProtocol) ReadI16() (value int16, err error) {
|
||||
v, err := p.ReadI32()
|
||||
return int16(v), err
|
||||
}
|
||||
|
||||
// Read an i32 from the wire as a zigzag varint.
|
||||
func (p *TCompactProtocol) ReadI32() (value int32, err error) {
|
||||
v, e := p.readVarint32()
|
||||
if e != nil {
|
||||
return 0, NewTProtocolException(e)
|
||||
}
|
||||
value = p.zigzagToInt32(v)
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Read an i64 from the wire as a zigzag varint.
|
||||
func (p *TCompactProtocol) ReadI64() (value int64, err error) {
|
||||
v, e := p.readVarint64()
|
||||
if e != nil {
|
||||
return 0, NewTProtocolException(e)
|
||||
}
|
||||
value = p.zigzagToInt64(v)
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// No magic here - just read a double off the wire.
|
||||
func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
|
||||
longBits := p.buffer[0:8]
|
||||
_, e := io.ReadFull(p.trans, longBits)
|
||||
if e != nil {
|
||||
return 0.0, NewTProtocolException(e)
|
||||
}
|
||||
return math.Float64frombits(p.bytesToUint64(longBits)), nil
|
||||
}
|
||||
|
||||
// Reads a []byte (via readBinary), and then UTF-8 decodes it.
|
||||
func (p *TCompactProtocol) ReadString() (value string, err error) {
|
||||
length, e := p.readVarint32()
|
||||
if e != nil {
|
||||
return "", NewTProtocolException(e)
|
||||
}
|
||||
if length < 0 {
|
||||
return "", invalidDataLength
|
||||
}
|
||||
if uint64(length) > p.trans.RemainingBytes() {
|
||||
return "", invalidDataLength
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
return "", nil
|
||||
}
|
||||
var buf []byte
|
||||
if length <= int32(len(p.buffer)) {
|
||||
buf = p.buffer[0:length]
|
||||
} else {
|
||||
buf = make([]byte, length)
|
||||
}
|
||||
_, e = io.ReadFull(p.trans, buf)
|
||||
return string(buf), NewTProtocolException(e)
|
||||
}
|
||||
|
||||
// Read a []byte from the wire.
|
||||
func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
|
||||
length, e := p.readVarint32()
|
||||
if e != nil {
|
||||
return nil, NewTProtocolException(e)
|
||||
}
|
||||
if length == 0 {
|
||||
return []byte{}, nil
|
||||
}
|
||||
if length < 0 {
|
||||
return nil, invalidDataLength
|
||||
}
|
||||
if uint64(length) > p.trans.RemainingBytes() {
|
||||
return nil, invalidDataLength
|
||||
}
|
||||
|
||||
buf := make([]byte, length)
|
||||
_, e = io.ReadFull(p.trans, buf)
|
||||
return buf, NewTProtocolException(e)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) Flush() (err error) {
|
||||
return NewTProtocolException(p.trans.Flush())
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) Transport() TTransport {
|
||||
return p.origTransport
|
||||
}
|
||||
|
||||
//
|
||||
// Internal writing methods
|
||||
//
|
||||
|
||||
// Abstract method for writing the start of lists and sets. List and sets on
|
||||
// the wire differ only by the type indicator.
|
||||
func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
|
||||
if size <= 14 {
|
||||
return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
|
||||
}
|
||||
err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
m, err := p.writeVarint32(int32(size))
|
||||
return 1 + m, err
|
||||
}
|
||||
|
||||
// Write an i32 as a varint. Results in 1-5 bytes on the wire.
|
||||
// TODO(pomack): make a permanent buffer like writeVarint64?
|
||||
func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
|
||||
i32buf := p.buffer[0:5]
|
||||
idx := 0
|
||||
for {
|
||||
if (n & ^0x7F) == 0 {
|
||||
i32buf[idx] = byte(n)
|
||||
idx++
|
||||
// p.writeByteDirect(byte(n));
|
||||
break
|
||||
// return;
|
||||
} else {
|
||||
i32buf[idx] = byte((n & 0x7F) | 0x80)
|
||||
idx++
|
||||
// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
|
||||
u := uint32(n)
|
||||
n = int32(u >> 7)
|
||||
}
|
||||
}
|
||||
return p.trans.Write(i32buf[0:idx])
|
||||
}
|
||||
|
||||
// Write an i64 as a varint. Results in 1-10 bytes on the wire.
|
||||
func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
|
||||
varint64out := p.buffer[0:10]
|
||||
idx := 0
|
||||
for {
|
||||
if (n & ^0x7F) == 0 {
|
||||
varint64out[idx] = byte(n)
|
||||
idx++
|
||||
break
|
||||
} else {
|
||||
varint64out[idx] = byte((n & 0x7F) | 0x80)
|
||||
idx++
|
||||
u := uint64(n)
|
||||
n = int64(u >> 7)
|
||||
}
|
||||
}
|
||||
return p.trans.Write(varint64out[0:idx])
|
||||
}
|
||||
|
||||
// Convert l into a zigzag long. This allows negative numbers to be
|
||||
// represented compactly as a varint.
|
||||
func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
|
||||
return (l << 1) ^ (l >> 63)
|
||||
}
|
||||
|
||||
// Convert l into a zigzag long. This allows negative numbers to be
|
||||
// represented compactly as a varint.
|
||||
func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
|
||||
return (n << 1) ^ (n >> 31)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
|
||||
binary.LittleEndian.PutUint64(buf, n)
|
||||
}
|
||||
|
||||
func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
|
||||
binary.LittleEndian.PutUint64(buf, uint64(n))
|
||||
}
|
||||
|
||||
// Writes a byte without any possibility of all that field header nonsense.
|
||||
// Used internally by other writing methods that know they need to write a byte.
|
||||
func (p *TCompactProtocol) writeByteDirect(b byte) error {
|
||||
return p.trans.WriteByte(b)
|
||||
}
|
||||
|
||||
// Writes a byte without any possibility of all that field header nonsense.
|
||||
func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
|
||||
return 1, p.writeByteDirect(byte(n))
|
||||
}
|
||||
|
||||
//
|
||||
// Internal reading methods
|
||||
//
|
||||
|
||||
// Read an i32 from the wire as a varint. The MSB of each byte is set
|
||||
// if there is another byte to follow. This can read up to 5 bytes.
|
||||
func (p *TCompactProtocol) readVarint32() (int32, error) {
|
||||
// if the wire contains the right stuff, this will just truncate the i64 we
|
||||
// read and get us the right sign.
|
||||
v, err := p.readVarint64()
|
||||
return int32(v), err
|
||||
}
|
||||
|
||||
// Read an i64 from the wire as a proper varint. The MSB of each byte is set
|
||||
// if there is another byte to follow. This can read up to 10 bytes.
|
||||
func (p *TCompactProtocol) readVarint64() (int64, error) {
|
||||
shift := uint(0)
|
||||
result := int64(0)
|
||||
for {
|
||||
b, err := p.readByteDirect()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
result |= int64(b&0x7f) << shift
|
||||
if (b & 0x80) != 0x80 {
|
||||
break
|
||||
}
|
||||
shift += 7
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
|
||||
func (p *TCompactProtocol) readByteDirect() (byte, error) {
|
||||
return p.trans.ReadByte()
|
||||
}
|
||||
|
||||
//
|
||||
// encoding helpers
|
||||
//
|
||||
|
||||
// Convert from zigzag int to int.
|
||||
func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
|
||||
u := uint32(n)
|
||||
return int32(u>>1) ^ -(n & 1)
|
||||
}
|
||||
|
||||
// Convert from zigzag long to long.
|
||||
func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
|
||||
u := uint64(n)
|
||||
return int64(u>>1) ^ -(n & 1)
|
||||
}
|
||||
|
||||
// Note that it's important that the mask bytes are long literals,
|
||||
// otherwise they'll default to ints, and when you shift an int left 56 bits,
|
||||
// you just get a messed up int.
|
||||
func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
|
||||
return int64(binary.LittleEndian.Uint64(b))
|
||||
}
|
||||
|
||||
// Note that it's important that the mask bytes are long literals,
|
||||
// otherwise they'll default to ints, and when you shift an int left 56 bits,
|
||||
// you just get a messed up int.
|
||||
func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
|
||||
return binary.LittleEndian.Uint64(b)
|
||||
}
|
||||
|
||||
//
|
||||
// type testing and converting
|
||||
//
|
||||
|
||||
func (p *TCompactProtocol) isBoolType(b byte) bool {
|
||||
return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
|
||||
}
|
||||
|
||||
// Given a tCompactType constant, convert it to its corresponding
|
||||
// TType value.
|
||||
func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
|
||||
switch byte(t) & 0x0f {
|
||||
case STOP:
|
||||
return STOP, nil
|
||||
case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
|
||||
return BOOL, nil
|
||||
case COMPACT_BYTE:
|
||||
return BYTE, nil
|
||||
case COMPACT_I16:
|
||||
return I16, nil
|
||||
case COMPACT_I32:
|
||||
return I32, nil
|
||||
case COMPACT_I64:
|
||||
return I64, nil
|
||||
case COMPACT_DOUBLE:
|
||||
return DOUBLE, nil
|
||||
case COMPACT_BINARY:
|
||||
return STRING, nil
|
||||
case COMPACT_LIST:
|
||||
return LIST, nil
|
||||
case COMPACT_SET:
|
||||
return SET, nil
|
||||
case COMPACT_MAP:
|
||||
return MAP, nil
|
||||
case COMPACT_STRUCT:
|
||||
return STRUCT, nil
|
||||
}
|
||||
return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f))
|
||||
}
|
||||
|
||||
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
|
||||
func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
|
||||
return ttypeToCompactType[t]
|
||||
}
|
|
@ -0,0 +1,269 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
type TDebugProtocol struct {
|
||||
Delegate TProtocol
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
type TDebugProtocolFactory struct {
|
||||
Underlying TProtocolFactory
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
|
||||
return &TDebugProtocolFactory{
|
||||
Underlying: underlying,
|
||||
LogPrefix: logPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return &TDebugProtocol{
|
||||
Delegate: t.Underlying.GetProtocol(trans),
|
||||
LogPrefix: t.LogPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
|
||||
log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMessageEnd() error {
|
||||
err := tdp.Delegate.WriteMessageEnd()
|
||||
log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
|
||||
err := tdp.Delegate.WriteStructBegin(name)
|
||||
log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructEnd() error {
|
||||
err := tdp.Delegate.WriteStructEnd()
|
||||
log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
|
||||
log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldEnd() error {
|
||||
err := tdp.Delegate.WriteFieldEnd()
|
||||
log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldStop() error {
|
||||
err := tdp.Delegate.WriteFieldStop()
|
||||
log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
|
||||
log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapEnd() error {
|
||||
err := tdp.Delegate.WriteMapEnd()
|
||||
log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteListBegin(elemType, size)
|
||||
log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListEnd() error {
|
||||
err := tdp.Delegate.WriteListEnd()
|
||||
log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteSetBegin(elemType, size)
|
||||
log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetEnd() error {
|
||||
err := tdp.Delegate.WriteSetEnd()
|
||||
log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBool(value bool) error {
|
||||
err := tdp.Delegate.WriteBool(value)
|
||||
log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteByte(value int8) error {
|
||||
err := tdp.Delegate.WriteByte(value)
|
||||
log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI16(value int16) error {
|
||||
err := tdp.Delegate.WriteI16(value)
|
||||
log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI32(value int32) error {
|
||||
err := tdp.Delegate.WriteI32(value)
|
||||
log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI64(value int64) error {
|
||||
err := tdp.Delegate.WriteI64(value)
|
||||
log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteDouble(value float64) error {
|
||||
err := tdp.Delegate.WriteDouble(value)
|
||||
log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteString(value string) error {
|
||||
err := tdp.Delegate.WriteString(value)
|
||||
log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
|
||||
err := tdp.Delegate.WriteBinary(value)
|
||||
log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
|
||||
log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMessageEnd()
|
||||
log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
|
||||
name, err = tdp.Delegate.ReadStructBegin()
|
||||
log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
|
||||
err = tdp.Delegate.ReadStructEnd()
|
||||
log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
|
||||
name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
|
||||
log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
|
||||
err = tdp.Delegate.ReadFieldEnd()
|
||||
log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
|
||||
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
|
||||
log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMapEnd()
|
||||
log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadListBegin()
|
||||
log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListEnd() (err error) {
|
||||
err = tdp.Delegate.ReadListEnd()
|
||||
log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadSetBegin()
|
||||
log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
|
||||
err = tdp.Delegate.ReadSetEnd()
|
||||
log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
|
||||
value, err = tdp.Delegate.ReadBool()
|
||||
log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
|
||||
value, err = tdp.Delegate.ReadByte()
|
||||
log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
|
||||
value, err = tdp.Delegate.ReadI16()
|
||||
log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
|
||||
value, err = tdp.Delegate.ReadI32()
|
||||
log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
|
||||
value, err = tdp.Delegate.ReadI64()
|
||||
log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
|
||||
value, err = tdp.Delegate.ReadDouble()
|
||||
log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadString() (value string, err error) {
|
||||
value, err = tdp.Delegate.ReadString()
|
||||
log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
|
||||
value, err = tdp.Delegate.ReadBinary()
|
||||
log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
|
||||
err = tdp.Delegate.Skip(fieldType)
|
||||
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Flush() (err error) {
|
||||
err = tdp.Delegate.Flush()
|
||||
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) Transport() TTransport {
|
||||
return tdp.Delegate.Transport()
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TDeserializer struct {
|
||||
Transport TTransport
|
||||
Protocol TProtocol
|
||||
}
|
||||
|
||||
func NewTDeserializer() *TDeserializer {
|
||||
var transport TTransport
|
||||
transport = NewTMemoryBufferLen(1024)
|
||||
|
||||
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
|
||||
|
||||
return &TDeserializer{
|
||||
transport,
|
||||
protocol}
|
||||
}
|
||||
|
||||
func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write([]byte(s)); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write(b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Generic Thrift exception
|
||||
type TException interface {
|
||||
error
|
||||
}
|
||||
|
||||
// Prepends additional information to an error without losing the Thrift exception interface
|
||||
func PrependError(prepend string, err error) error {
|
||||
if t, ok := err.(TTransportException); ok {
|
||||
return NewTTransportException(t.TypeId(), prepend+t.Error())
|
||||
}
|
||||
if t, ok := err.(TProtocolException); ok {
|
||||
return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
|
||||
}
|
||||
if t, ok := err.(TApplicationException); ok {
|
||||
return NewTApplicationException(t.TypeId(), prepend+t.Error())
|
||||
}
|
||||
|
||||
return errors.New(prepend + err.Error())
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Helper class that encapsulates field metadata.
|
||||
type field struct {
|
||||
name string
|
||||
typeId TType
|
||||
id int
|
||||
}
|
||||
|
||||
func newField(n string, t TType, i int) *field {
|
||||
return &field{name: n, typeId: t, id: i}
|
||||
}
|
||||
|
||||
func (p *field) Name() string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return p.name
|
||||
}
|
||||
|
||||
func (p *field) TypeId() TType {
|
||||
if p == nil {
|
||||
return TType(VOID)
|
||||
}
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *field) Id() int {
|
||||
if p == nil {
|
||||
return -1
|
||||
}
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *field) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
|
||||
}
|
||||
|
||||
var ANONYMOUS_FIELD *field
|
||||
|
||||
type fieldSlice []field
|
||||
|
||||
func (p fieldSlice) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
func (p fieldSlice) Less(i, j int) bool {
|
||||
return p[i].Id() < p[j].Id()
|
||||
}
|
||||
|
||||
func (p fieldSlice) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
||||
|
||||
func init() {
|
||||
ANONYMOUS_FIELD = newField("", STOP, 0)
|
||||
}
|
167
vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go
generated
vendored
Normal file
167
vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const DEFAULT_MAX_LENGTH = 16384000
|
||||
|
||||
type TFramedTransport struct {
|
||||
transport TTransport
|
||||
buf bytes.Buffer
|
||||
reader *bufio.Reader
|
||||
frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
|
||||
buffer [4]byte
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
type tFramedTransportFactory struct {
|
||||
factory TTransportFactory
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
|
||||
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
|
||||
}
|
||||
|
||||
func NewTFramedTransport(transport TTransport) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Open() error {
|
||||
return p.transport.Open()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) IsOpen() bool {
|
||||
return p.transport.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Close() error {
|
||||
return p.transport.Close()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < uint32(len(buf)) {
|
||||
frameSize := p.frameSize
|
||||
tmp := make([]byte, p.frameSize)
|
||||
l, err = p.Read(tmp)
|
||||
copy(buf, tmp)
|
||||
if err == nil {
|
||||
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
|
||||
return
|
||||
}
|
||||
}
|
||||
got, err := p.reader.Read(buf)
|
||||
p.frameSize = p.frameSize - uint32(got)
|
||||
//sanity check
|
||||
if p.frameSize < 0 {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
|
||||
}
|
||||
return got, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) ReadByte() (c byte, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < 1 {
|
||||
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
|
||||
}
|
||||
c, err = p.reader.ReadByte()
|
||||
if err == nil {
|
||||
p.frameSize--
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Write(buf []byte) (int, error) {
|
||||
n, err := p.buf.Write(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteByte(c byte) error {
|
||||
return p.buf.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
|
||||
return p.buf.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Flush() error {
|
||||
size := p.buf.Len()
|
||||
buf := p.buffer[:4]
|
||||
binary.BigEndian.PutUint32(buf, uint32(size))
|
||||
_, err := p.transport.Write(buf)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if size > 0 {
|
||||
if n, err := p.buf.WriteTo(p.transport); err != nil {
|
||||
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
err = p.transport.Flush()
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) readFrameHeader() (uint32, error) {
|
||||
buf := p.buffer[:4]
|
||||
if _, err := io.ReadFull(p.reader, buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size := binary.BigEndian.Uint32(buf)
|
||||
if size < 0 || size > p.maxLength {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return uint64(p.frameSize)
|
||||
}
|
||||
|
|
@ -0,0 +1,212 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type THttpClient struct {
|
||||
response *http.Response
|
||||
url *url.URL
|
||||
requestBuffer *bytes.Buffer
|
||||
header http.Header
|
||||
nsecConnectTimeout int64
|
||||
nsecReadTimeout int64
|
||||
}
|
||||
|
||||
type THttpClientTransportFactory struct {
|
||||
url string
|
||||
isPost bool
|
||||
}
|
||||
|
||||
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*THttpClient)
|
||||
if ok && t.url != nil {
|
||||
if t.requestBuffer != nil {
|
||||
t2, _ := NewTHttpPostClient(t.url.String())
|
||||
return t2
|
||||
}
|
||||
t2, _ := NewTHttpClient(t.url.String())
|
||||
return t2
|
||||
}
|
||||
}
|
||||
if p.isPost {
|
||||
s, _ := NewTHttpPostClient(p.url)
|
||||
return s
|
||||
}
|
||||
s, _ := NewTHttpClient(p.url)
|
||||
return s
|
||||
}
|
||||
|
||||
func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return &THttpClientTransportFactory{url: url, isPost: false}
|
||||
}
|
||||
|
||||
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return &THttpClientTransportFactory{url: url, isPost: true}
|
||||
}
|
||||
|
||||
func NewTHttpClient(urlstr string) (TTransport, error) {
|
||||
parsedURL, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response, err := http.Get(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &THttpClient{response: response, url: parsedURL}, nil
|
||||
}
|
||||
|
||||
func NewTHttpPostClient(urlstr string) (TTransport, error) {
|
||||
parsedURL, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf := make([]byte, 0, 1024)
|
||||
return &THttpClient{url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: http.Header{}}, nil
|
||||
}
|
||||
|
||||
// Set the HTTP Header for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
|
||||
func (p *THttpClient) SetHeader(key string, value string) {
|
||||
p.header.Add(key, value)
|
||||
}
|
||||
|
||||
// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// hdrValue := httpTrans.GetHeader("User-Agent")
|
||||
func (p *THttpClient) GetHeader(key string) string {
|
||||
return p.header.Get(key)
|
||||
}
|
||||
|
||||
// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.DelHeader("User-Agent")
|
||||
func (p *THttpClient) DelHeader(key string) {
|
||||
p.header.Del(key)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Open() error {
|
||||
// do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) IsOpen() bool {
|
||||
return p.response != nil || p.requestBuffer != nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) closeResponse() error {
|
||||
var err error
|
||||
if p.response != nil && p.response.Body != nil {
|
||||
err = p.response.Body.Close()
|
||||
}
|
||||
|
||||
p.response = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *THttpClient) Close() error {
|
||||
if p.requestBuffer != nil {
|
||||
p.requestBuffer.Reset()
|
||||
p.requestBuffer = nil
|
||||
}
|
||||
return p.closeResponse()
|
||||
}
|
||||
|
||||
func (p *THttpClient) Read(buf []byte) (int, error) {
|
||||
if p.response == nil {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
|
||||
}
|
||||
n, err := p.response.Body.Read(buf)
|
||||
if n > 0 && (err == nil || err == io.EOF) {
|
||||
return n, nil
|
||||
}
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *THttpClient) ReadByte() (c byte, err error) {
|
||||
return readByte(p.response.Body)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Write(buf []byte) (int, error) {
|
||||
n, err := p.requestBuffer.Write(buf)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteByte(c byte) error {
|
||||
return p.requestBuffer.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteString(s string) (n int, err error) {
|
||||
return p.requestBuffer.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Flush() error {
|
||||
// Close any previous response body to avoid leaking connections.
|
||||
p.closeResponse()
|
||||
|
||||
client := &http.Client{}
|
||||
req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
p.header.Add("Content-Type", "application/x-thrift")
|
||||
req.Header = p.header
|
||||
response, err := client.Do(req)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// Close the response to avoid leaking file descriptors.
|
||||
response.Body.Close()
|
||||
// TODO(pomack) log bad response
|
||||
return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
p.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
|
||||
len := p.response.ContentLength
|
||||
if len >= 0 {
|
||||
return uint64(len)
|
||||
}
|
||||
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
214
vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go
generated
vendored
Normal file
214
vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
|
||||
type StreamTransport struct {
|
||||
io.Reader
|
||||
io.Writer
|
||||
isReadWriter bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
type StreamTransportFactory struct {
|
||||
Reader io.Reader
|
||||
Writer io.Writer
|
||||
isReadWriter bool
|
||||
}
|
||||
|
||||
func (p *StreamTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*StreamTransport)
|
||||
if ok {
|
||||
if t.isReadWriter {
|
||||
return NewStreamTransportRW(t.Reader.(io.ReadWriter))
|
||||
}
|
||||
if t.Reader != nil && t.Writer != nil {
|
||||
return NewStreamTransport(t.Reader, t.Writer)
|
||||
}
|
||||
if t.Reader != nil && t.Writer == nil {
|
||||
return NewStreamTransportR(t.Reader)
|
||||
}
|
||||
if t.Reader == nil && t.Writer != nil {
|
||||
return NewStreamTransportW(t.Writer)
|
||||
}
|
||||
return &StreamTransport{}
|
||||
}
|
||||
}
|
||||
if p.isReadWriter {
|
||||
return NewStreamTransportRW(p.Reader.(io.ReadWriter))
|
||||
}
|
||||
if p.Reader != nil && p.Writer != nil {
|
||||
return NewStreamTransport(p.Reader, p.Writer)
|
||||
}
|
||||
if p.Reader != nil && p.Writer == nil {
|
||||
return NewStreamTransportR(p.Reader)
|
||||
}
|
||||
if p.Reader == nil && p.Writer != nil {
|
||||
return NewStreamTransportW(p.Writer)
|
||||
}
|
||||
return &StreamTransport{}
|
||||
}
|
||||
|
||||
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
|
||||
return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
|
||||
}
|
||||
|
||||
func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportR(r io.Reader) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r)}
|
||||
}
|
||||
|
||||
func NewStreamTransportW(w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
|
||||
bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
|
||||
return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
|
||||
}
|
||||
|
||||
func (p *StreamTransport) IsOpen() bool {
|
||||
return !p.closed
|
||||
}
|
||||
|
||||
// implicitly opened on creation, can't be reopened once closed
|
||||
func (p *StreamTransport) Open() error {
|
||||
if !p.closed {
|
||||
return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
|
||||
} else {
|
||||
return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
|
||||
}
|
||||
}
|
||||
|
||||
// Closes both the input and output streams.
|
||||
func (p *StreamTransport) Close() error {
|
||||
if p.closed {
|
||||
return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
|
||||
}
|
||||
p.closed = true
|
||||
closedReader := false
|
||||
if p.Reader != nil {
|
||||
c, ok := p.Reader.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
closedReader = true
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Reader = nil
|
||||
}
|
||||
if p.Writer != nil && (!closedReader || !p.isReadWriter) {
|
||||
c, ok := p.Writer.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Writer = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flushes the underlying output stream if not null.
|
||||
func (p *StreamTransport) Flush() error {
|
||||
if p.Writer == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
|
||||
}
|
||||
f, ok := p.Writer.(Flusher)
|
||||
if ok {
|
||||
err := f.Flush()
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Read(c []byte) (n int, err error) {
|
||||
n, err = p.Reader.Read(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) ReadByte() (c byte, err error) {
|
||||
f, ok := p.Reader.(io.ByteReader)
|
||||
if ok {
|
||||
c, err = f.ReadByte()
|
||||
} else {
|
||||
c, err = readByte(p.Reader)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Write(c []byte) (n int, err error) {
|
||||
n, err = p.Writer.Write(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteByte(c byte) (err error) {
|
||||
f, ok := p.Writer.(io.ByteWriter)
|
||||
if ok {
|
||||
err = f.WriteByte(c)
|
||||
} else {
|
||||
err = writeByte(p.Writer, c)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteString(s string) (n int, err error) {
|
||||
f, ok := p.Writer.(stringWriter)
|
||||
if ok {
|
||||
n, err = f.WriteString(s)
|
||||
} else {
|
||||
n, err = p.Writer.Write([]byte(s))
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
|
@ -0,0 +1,580 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
THRIFT_JSON_PROTOCOL_VERSION = 1
|
||||
)
|
||||
|
||||
// for references to _ParseContext see tsimplejson_protocol.go
|
||||
|
||||
// JSON protocol implementation for thrift.
|
||||
//
|
||||
// This protocol produces/consumes a simple output format
|
||||
// suitable for parsing by scripting languages. It should not be
|
||||
// confused with the full-featured TJSONProtocol.
|
||||
//
|
||||
type TJSONProtocol struct {
|
||||
*TSimpleJSONProtocol
|
||||
}
|
||||
|
||||
// Constructor
|
||||
func NewTJSONProtocol(t TTransport) *TJSONProtocol {
|
||||
v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
|
||||
v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
|
||||
v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
|
||||
return v
|
||||
}
|
||||
|
||||
// Factory
|
||||
type TJSONProtocolFactory struct{}
|
||||
|
||||
func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return NewTJSONProtocol(trans)
|
||||
}
|
||||
|
||||
func NewTJSONProtocolFactory() *TJSONProtocolFactory {
|
||||
return &TJSONProtocolFactory{}
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteString(name); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteByte(int8(typeId)); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(seqId); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructBegin(name string) error {
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
if e := p.WriteI16(id); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(typeId)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldStop() error { return nil }
|
||||
|
||||
func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(keyType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 = p.TypeIdToString(valueType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputObjectBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMapEnd() error {
|
||||
if e := p.OutputObjectEnd(); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBool(b bool) error {
|
||||
if b {
|
||||
return p.WriteI32(1)
|
||||
}
|
||||
return p.WriteI32(0)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteByte(b int8) error {
|
||||
return p.WriteI32(int32(b))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI16(v int16) error {
|
||||
return p.WriteI32(int32(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI32(v int32) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI64(v int64) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteDouble(v float64) error {
|
||||
return p.OutputF64(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteString(v string) error {
|
||||
return p.OutputString(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBinary(v []byte) error {
|
||||
// JSON library only takes in a string,
|
||||
// not an arbitrary byte array, to ensure bytes are transmitted
|
||||
// efficiently we must convert this into a valid JSON string
|
||||
// therefore we use base64 encoding to avoid excessive escaping/quoting
|
||||
if e := p.OutputPreValue(); e != nil {
|
||||
return e
|
||||
}
|
||||
if _, e := p.writer.Write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
writer := base64.NewEncoder(base64.StdEncoding, p.writer)
|
||||
if _, e := writer.Write(v); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if e := writer.Close(); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if _, e := p.writer.Write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
return p.OutputPostValue()
|
||||
}
|
||||
|
||||
// Reading methods.
|
||||
func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
if isNull, err := p.ParseListBegin(); isNull || err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
version, err := p.ReadI32()
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if version != THRIFT_JSON_PROTOCOL_VERSION {
|
||||
e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
|
||||
}
|
||||
if name, err = p.ReadString(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
bTypeId, err := p.ReadByte()
|
||||
typeId = TMessageType(bTypeId)
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if seqId, err = p.ReadI32(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMessageEnd() error {
|
||||
err := p.ParseListEnd()
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
|
||||
_, err = p.ParseObjectStart()
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
|
||||
b, _ := p.reader.Peek(1)
|
||||
if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
|
||||
return "", STOP, -1, nil
|
||||
}
|
||||
fieldId, err := p.ReadI16()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
if _, err = p.ParseObjectStart(); err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
sType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
fType, err := p.StringToTypeId(sType)
|
||||
return "", fType, fieldId, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, VOID, 0, e
|
||||
}
|
||||
|
||||
// read keyType
|
||||
sKeyType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
keyType, e = p.StringToTypeId(sKeyType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read valueType
|
||||
sValueType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
valueType, e = p.StringToTypeId(sValueType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read size
|
||||
iSize, e := p.ReadI64()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
size = int(iSize)
|
||||
|
||||
_, e = p.ParseObjectStart()
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapEnd() error {
|
||||
e := p.ParseObjectEnd()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBool() (bool, error) {
|
||||
value, err := p.ReadI32()
|
||||
return (value != 0), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI16() (int16, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int16(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI32() (int32, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int32(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI64() (int64, error) {
|
||||
v, _, err := p.ParseI64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadDouble() (float64, error) {
|
||||
v, _, err := p.ParseF64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadString() (string, error) {
|
||||
var v string
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseStringBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) >= 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
|
||||
var v []byte
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseBase64EncodedBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) >= 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Flush() (err error) {
|
||||
err = p.writer.Flush()
|
||||
if err == nil {
|
||||
err = p.trans.Flush()
|
||||
}
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Transport() TTransport {
|
||||
return p.trans
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.OutputString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
|
||||
switch byte(fieldType) {
|
||||
case BOOL:
|
||||
return "tf", nil
|
||||
case BYTE:
|
||||
return "i8", nil
|
||||
case I16:
|
||||
return "i16", nil
|
||||
case I32:
|
||||
return "i32", nil
|
||||
case I64:
|
||||
return "i64", nil
|
||||
case DOUBLE:
|
||||
return "dbl", nil
|
||||
case STRING:
|
||||
return "str", nil
|
||||
case STRUCT:
|
||||
return "rec", nil
|
||||
case MAP:
|
||||
return "map", nil
|
||||
case SET:
|
||||
return "set", nil
|
||||
case LIST:
|
||||
return "lst", nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
|
||||
return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
|
||||
switch fieldType {
|
||||
case "tf":
|
||||
return TType(BOOL), nil
|
||||
case "i8":
|
||||
return TType(BYTE), nil
|
||||
case "i16":
|
||||
return TType(I16), nil
|
||||
case "i32":
|
||||
return TType(I32), nil
|
||||
case "i64":
|
||||
return TType(I64), nil
|
||||
case "dbl":
|
||||
return TType(DOUBLE), nil
|
||||
case "str":
|
||||
return TType(STRING), nil
|
||||
case "rec":
|
||||
return TType(STRUCT), nil
|
||||
case "map":
|
||||
return TType(MAP), nil
|
||||
case "set":
|
||||
return TType(SET), nil
|
||||
case "lst":
|
||||
return TType(LIST), nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown type identifier: %s", fieldType)
|
||||
return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// Memory buffer-based implementation of the TTransport interface.
|
||||
type TMemoryBuffer struct {
|
||||
*bytes.Buffer
|
||||
size int
|
||||
}
|
||||
|
||||
type TMemoryBufferTransportFactory struct {
|
||||
size int
|
||||
}
|
||||
|
||||
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*TMemoryBuffer)
|
||||
if ok && t.size > 0 {
|
||||
return NewTMemoryBufferLen(t.size)
|
||||
}
|
||||
}
|
||||
return NewTMemoryBufferLen(p.size)
|
||||
}
|
||||
|
||||
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
|
||||
return &TMemoryBufferTransportFactory{size: size}
|
||||
}
|
||||
|
||||
func NewTMemoryBuffer() *TMemoryBuffer {
|
||||
return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
|
||||
}
|
||||
|
||||
func NewTMemoryBufferLen(size int) *TMemoryBuffer {
|
||||
buf := make([]byte, 0, size)
|
||||
return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) IsOpen() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) Open() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) Close() error {
|
||||
p.Buffer.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flushing a memory buffer is a no-op
|
||||
func (p *TMemoryBuffer) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
|
||||
return uint64(p.Buffer.Len())
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Message type constants in the Thrift protocol.
|
||||
type TMessageType int32
|
||||
|
||||
const (
|
||||
INVALID_TMESSAGE_TYPE TMessageType = 0
|
||||
CALL TMessageType = 1
|
||||
REPLY TMessageType = 2
|
||||
EXCEPTION TMessageType = 3
|
||||
ONEWAY TMessageType = 4
|
||||
)
|
169
vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go
generated
vendored
Normal file
169
vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
TMultiplexedProtocol is a protocol-independent concrete decorator
|
||||
that allows a Thrift client to communicate with a multiplexing Thrift server,
|
||||
by prepending the service name to the function name during function calls.
|
||||
|
||||
NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request
|
||||
from a multiplexing client.
|
||||
|
||||
This example uses a single socket transport to invoke two services:
|
||||
|
||||
socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
|
||||
transport := thrift.NewTFramedTransport(socket)
|
||||
protocol := thrift.NewTBinaryProtocolTransport(transport)
|
||||
|
||||
mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
|
||||
service := Calculator.NewCalculatorClient(mp)
|
||||
|
||||
mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
|
||||
service2 := WeatherReport.NewWeatherReportClient(mp2)
|
||||
|
||||
err := transport.Open()
|
||||
if err != nil {
|
||||
t.Fatal("Unable to open client socket", err)
|
||||
}
|
||||
|
||||
fmt.Println(service.Add(2,2))
|
||||
fmt.Println(service2.GetTemperature())
|
||||
*/
|
||||
|
||||
type TMultiplexedProtocol struct {
|
||||
TProtocol
|
||||
serviceName string
|
||||
}
|
||||
|
||||
const MULTIPLEXED_SEPARATOR = ":"
|
||||
|
||||
func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
|
||||
return &TMultiplexedProtocol{
|
||||
TProtocol: protocol,
|
||||
serviceName: serviceName,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
if typeId == CALL || typeId == ONEWAY {
|
||||
return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
|
||||
} else {
|
||||
return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TMultiplexedProcessor is a TProcessor allowing
|
||||
a single TServer to provide multiple services.
|
||||
|
||||
To do so, you instantiate the processor and then register additional
|
||||
processors with it, as shown in the following example:
|
||||
|
||||
var processor = thrift.NewTMultiplexedProcessor()
|
||||
|
||||
firstProcessor :=
|
||||
processor.RegisterProcessor("FirstService", firstProcessor)
|
||||
|
||||
processor.registerProcessor(
|
||||
"Calculator",
|
||||
Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
|
||||
)
|
||||
|
||||
processor.registerProcessor(
|
||||
"WeatherReport",
|
||||
WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
|
||||
)
|
||||
|
||||
serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create server socket", err)
|
||||
}
|
||||
server := thrift.NewTSimpleServer2(processor, serverTransport)
|
||||
server.Serve();
|
||||
*/
|
||||
|
||||
type TMultiplexedProcessor struct {
|
||||
serviceProcessorMap map[string]TProcessor
|
||||
DefaultProcessor TProcessor
|
||||
}
|
||||
|
||||
func NewTMultiplexedProcessor() *TMultiplexedProcessor {
|
||||
return &TMultiplexedProcessor{
|
||||
serviceProcessorMap: make(map[string]TProcessor),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
|
||||
t.DefaultProcessor = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
|
||||
if t.serviceProcessorMap == nil {
|
||||
t.serviceProcessorMap = make(map[string]TProcessor)
|
||||
}
|
||||
t.serviceProcessorMap[name] = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
|
||||
name, typeId, seqid, err := in.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if typeId != CALL && typeId != ONEWAY {
|
||||
return false, fmt.Errorf("Unexpected message type %v", typeId)
|
||||
}
|
||||
//extract the service name
|
||||
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
|
||||
if len(v) != 2 {
|
||||
if t.DefaultProcessor != nil {
|
||||
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
|
||||
return t.DefaultProcessor.Process(smb, out)
|
||||
}
|
||||
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
|
||||
}
|
||||
actualProcessor, ok := t.serviceProcessorMap[v[0]]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
|
||||
}
|
||||
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
|
||||
return actualProcessor.Process(smb, out)
|
||||
}
|
||||
|
||||
//Protocol that use stored message for ReadMessageBegin
|
||||
type storedMessageProtocol struct {
|
||||
TProtocol
|
||||
name string
|
||||
typeId TMessageType
|
||||
seqid int32
|
||||
}
|
||||
|
||||
func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
|
||||
return &storedMessageProtocol{protocol, name, typeId, seqid}
|
||||
}
|
||||
|
||||
func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
return s.name, s.typeId, s.seqid, nil
|
||||
}
|
|
@ -0,0 +1,164 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Numeric interface {
|
||||
Int64() int64
|
||||
Int32() int32
|
||||
Int16() int16
|
||||
Byte() byte
|
||||
Int() int
|
||||
Float64() float64
|
||||
Float32() float32
|
||||
String() string
|
||||
isNull() bool
|
||||
}
|
||||
|
||||
type numeric struct {
|
||||
iValue int64
|
||||
dValue float64
|
||||
sValue string
|
||||
isNil bool
|
||||
}
|
||||
|
||||
var (
|
||||
INFINITY Numeric
|
||||
NEGATIVE_INFINITY Numeric
|
||||
NAN Numeric
|
||||
ZERO Numeric
|
||||
NUMERIC_NULL Numeric
|
||||
)
|
||||
|
||||
func NewNumericFromDouble(dValue float64) Numeric {
|
||||
if math.IsInf(dValue, 1) {
|
||||
return INFINITY
|
||||
}
|
||||
if math.IsInf(dValue, -1) {
|
||||
return NEGATIVE_INFINITY
|
||||
}
|
||||
if math.IsNaN(dValue) {
|
||||
return NAN
|
||||
}
|
||||
iValue := int64(dValue)
|
||||
sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
|
||||
isNil := false
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromI64(iValue int64) Numeric {
|
||||
dValue := float64(iValue)
|
||||
sValue := string(iValue)
|
||||
isNil := false
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromI32(iValue int32) Numeric {
|
||||
dValue := float64(iValue)
|
||||
sValue := string(iValue)
|
||||
isNil := false
|
||||
return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromString(sValue string) Numeric {
|
||||
if sValue == INFINITY.String() {
|
||||
return INFINITY
|
||||
}
|
||||
if sValue == NEGATIVE_INFINITY.String() {
|
||||
return NEGATIVE_INFINITY
|
||||
}
|
||||
if sValue == NAN.String() {
|
||||
return NAN
|
||||
}
|
||||
iValue, _ := strconv.ParseInt(sValue, 10, 64)
|
||||
dValue, _ := strconv.ParseFloat(sValue, 64)
|
||||
isNil := len(sValue) == 0
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
|
||||
}
|
||||
|
||||
func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
|
||||
if isNull {
|
||||
return NewNullNumeric()
|
||||
}
|
||||
if sValue == JSON_INFINITY {
|
||||
return INFINITY
|
||||
}
|
||||
if sValue == JSON_NEGATIVE_INFINITY {
|
||||
return NEGATIVE_INFINITY
|
||||
}
|
||||
if sValue == JSON_NAN {
|
||||
return NAN
|
||||
}
|
||||
iValue, _ := strconv.ParseInt(sValue, 10, 64)
|
||||
dValue, _ := strconv.ParseFloat(sValue, 64)
|
||||
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
|
||||
}
|
||||
|
||||
func NewNullNumeric() Numeric {
|
||||
return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
|
||||
}
|
||||
|
||||
func (p *numeric) Int64() int64 {
|
||||
return p.iValue
|
||||
}
|
||||
|
||||
func (p *numeric) Int32() int32 {
|
||||
return int32(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Int16() int16 {
|
||||
return int16(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Byte() byte {
|
||||
return byte(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Int() int {
|
||||
return int(p.iValue)
|
||||
}
|
||||
|
||||
func (p *numeric) Float64() float64 {
|
||||
return p.dValue
|
||||
}
|
||||
|
||||
func (p *numeric) Float32() float32 {
|
||||
return float32(p.dValue)
|
||||
}
|
||||
|
||||
func (p *numeric) String() string {
|
||||
return p.sValue
|
||||
}
|
||||
|
||||
func (p *numeric) isNull() bool {
|
||||
return p.isNil
|
||||
}
|
||||
|
||||
func init() {
|
||||
INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
|
||||
NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
|
||||
NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
|
||||
ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
|
||||
NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// This file is home to helpers that convert from various base types to
|
||||
// respective pointer types. This is necessary because Go does not permit
|
||||
// references to constants, nor can a pointer type to base type be allocated
|
||||
// and initialized in a single expression.
|
||||
//
|
||||
// E.g., this is not allowed:
|
||||
//
|
||||
// var ip *int = &5
|
||||
//
|
||||
// But this *is* allowed:
|
||||
//
|
||||
// func IntPtr(i int) *int { return &i }
|
||||
// var ip *int = IntPtr(5)
|
||||
//
|
||||
// Since pointers to base types are commonplace as [optional] fields in
|
||||
// exported thrift structs, we factor such helpers here.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func Float32Ptr(v float32) *float32 { return &v }
|
||||
func Float64Ptr(v float64) *float64 { return &v }
|
||||
func IntPtr(v int) *int { return &v }
|
||||
func Int32Ptr(v int32) *int32 { return &v }
|
||||
func Int64Ptr(v int64) *int64 { return &v }
|
||||
func StringPtr(v string) *string { return &v }
|
||||
func Uint32Ptr(v uint32) *uint32 { return &v }
|
||||
func Uint64Ptr(v uint64) *uint64 { return &v }
|
||||
func BoolPtr(v bool) *bool { return &v }
|
||||
func ByteSlicePtr(v []byte) *[]byte { return &v }
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// A processor is a generic object which operates upon an input stream and
|
||||
// writes to some output stream.
|
||||
type TProcessor interface {
|
||||
Process(in, out TProtocol) (bool, TException)
|
||||
}
|
||||
|
||||
type TProcessorFunction interface {
|
||||
Process(seqId int32, in, out TProtocol) (bool, TException)
|
||||
}
|
58
vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go
generated
vendored
Normal file
58
vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// The default processor factory just returns a singleton
|
||||
// instance.
|
||||
type TProcessorFactory interface {
|
||||
GetProcessor(trans TTransport) TProcessor
|
||||
}
|
||||
|
||||
type tProcessorFactory struct {
|
||||
processor TProcessor
|
||||
}
|
||||
|
||||
func NewTProcessorFactory(p TProcessor) TProcessorFactory {
|
||||
return &tProcessorFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
|
||||
return p.processor
|
||||
}
|
||||
|
||||
/**
|
||||
* The default processor factory just returns a singleton
|
||||
* instance.
|
||||
*/
|
||||
type TProcessorFunctionFactory interface {
|
||||
GetProcessorFunction(trans TTransport) TProcessorFunction
|
||||
}
|
||||
|
||||
type tProcessorFunctionFactory struct {
|
||||
processor TProcessorFunction
|
||||
}
|
||||
|
||||
func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
|
||||
return &tProcessorFunctionFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
|
||||
return p.processor
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
VERSION_MASK = 0xffff0000
|
||||
VERSION_1 = 0x80010000
|
||||
)
|
||||
|
||||
type TProtocol interface {
|
||||
WriteMessageBegin(name string, typeId TMessageType, seqid int32) error
|
||||
WriteMessageEnd() error
|
||||
WriteStructBegin(name string) error
|
||||
WriteStructEnd() error
|
||||
WriteFieldBegin(name string, typeId TType, id int16) error
|
||||
WriteFieldEnd() error
|
||||
WriteFieldStop() error
|
||||
WriteMapBegin(keyType TType, valueType TType, size int) error
|
||||
WriteMapEnd() error
|
||||
WriteListBegin(elemType TType, size int) error
|
||||
WriteListEnd() error
|
||||
WriteSetBegin(elemType TType, size int) error
|
||||
WriteSetEnd() error
|
||||
WriteBool(value bool) error
|
||||
WriteByte(value int8) error
|
||||
WriteI16(value int16) error
|
||||
WriteI32(value int32) error
|
||||
WriteI64(value int64) error
|
||||
WriteDouble(value float64) error
|
||||
WriteString(value string) error
|
||||
WriteBinary(value []byte) error
|
||||
|
||||
ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error)
|
||||
ReadMessageEnd() error
|
||||
ReadStructBegin() (name string, err error)
|
||||
ReadStructEnd() error
|
||||
ReadFieldBegin() (name string, typeId TType, id int16, err error)
|
||||
ReadFieldEnd() error
|
||||
ReadMapBegin() (keyType TType, valueType TType, size int, err error)
|
||||
ReadMapEnd() error
|
||||
ReadListBegin() (elemType TType, size int, err error)
|
||||
ReadListEnd() error
|
||||
ReadSetBegin() (elemType TType, size int, err error)
|
||||
ReadSetEnd() error
|
||||
ReadBool() (value bool, err error)
|
||||
ReadByte() (value int8, err error)
|
||||
ReadI16() (value int16, err error)
|
||||
ReadI32() (value int32, err error)
|
||||
ReadI64() (value int64, err error)
|
||||
ReadDouble() (value float64, err error)
|
||||
ReadString() (value string, err error)
|
||||
ReadBinary() (value []byte, err error)
|
||||
|
||||
Skip(fieldType TType) (err error)
|
||||
Flush() (err error)
|
||||
|
||||
Transport() TTransport
|
||||
}
|
||||
|
||||
// The maximum recursive depth the skip() function will traverse
|
||||
const DEFAULT_RECURSION_DEPTH = 64
|
||||
|
||||
// Skips over the next data element from the provided input TProtocol object.
|
||||
func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) {
|
||||
return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH)
|
||||
}
|
||||
|
||||
// Skips over the next data element from the provided input TProtocol object.
|
||||
func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) {
|
||||
|
||||
if maxDepth <= 0 {
|
||||
return NewTProtocolExceptionWithType( DEPTH_LIMIT, errors.New("Depth limit exceeded"))
|
||||
}
|
||||
|
||||
switch fieldType {
|
||||
case STOP:
|
||||
return
|
||||
case BOOL:
|
||||
_, err = self.ReadBool()
|
||||
return
|
||||
case BYTE:
|
||||
_, err = self.ReadByte()
|
||||
return
|
||||
case I16:
|
||||
_, err = self.ReadI16()
|
||||
return
|
||||
case I32:
|
||||
_, err = self.ReadI32()
|
||||
return
|
||||
case I64:
|
||||
_, err = self.ReadI64()
|
||||
return
|
||||
case DOUBLE:
|
||||
_, err = self.ReadDouble()
|
||||
return
|
||||
case STRING:
|
||||
_, err = self.ReadString()
|
||||
return
|
||||
case STRUCT:
|
||||
if _, err = self.ReadStructBegin(); err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
_, typeId, _, _ := self.ReadFieldBegin()
|
||||
if typeId == STOP {
|
||||
break
|
||||
}
|
||||
err := Skip(self, typeId, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.ReadFieldEnd()
|
||||
}
|
||||
return self.ReadStructEnd()
|
||||
case MAP:
|
||||
keyType, valueType, size, err := self.ReadMapBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
err := Skip(self, keyType, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
self.Skip(valueType)
|
||||
}
|
||||
return self.ReadMapEnd()
|
||||
case SET:
|
||||
elemType, size, err := self.ReadSetBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
err := Skip(self, elemType, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return self.ReadSetEnd()
|
||||
case LIST:
|
||||
elemType, size, err := self.ReadListBegin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
err := Skip(self, elemType, maxDepth-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return self.ReadListEnd()
|
||||
}
|
||||
return nil
|
||||
}
|
78
vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go
generated
vendored
Normal file
78
vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
// Thrift Protocol exception
|
||||
type TProtocolException interface {
|
||||
TException
|
||||
TypeId() int
|
||||
}
|
||||
|
||||
const (
|
||||
UNKNOWN_PROTOCOL_EXCEPTION = 0
|
||||
INVALID_DATA = 1
|
||||
NEGATIVE_SIZE = 2
|
||||
SIZE_LIMIT = 3
|
||||
BAD_VERSION = 4
|
||||
NOT_IMPLEMENTED = 5
|
||||
DEPTH_LIMIT = 6
|
||||
)
|
||||
|
||||
type tProtocolException struct {
|
||||
typeId int
|
||||
message string
|
||||
}
|
||||
|
||||
func (p *tProtocolException) TypeId() int {
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *tProtocolException) String() string {
|
||||
return p.message
|
||||
}
|
||||
|
||||
func (p *tProtocolException) Error() string {
|
||||
return p.message
|
||||
}
|
||||
|
||||
func NewTProtocolException(err error) TProtocolException {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if e,ok := err.(TProtocolException); ok {
|
||||
return e
|
||||
}
|
||||
if _, ok := err.(base64.CorruptInputError); ok {
|
||||
return &tProtocolException{INVALID_DATA, err.Error()}
|
||||
}
|
||||
return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()}
|
||||
}
|
||||
|
||||
func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &tProtocolException{errType, err.Error()}
|
||||
}
|
||||
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Factory interface for constructing protocol instances.
|
||||
type TProtocolFactory interface {
|
||||
GetProtocol(trans TTransport) TProtocol
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import "io"
|
||||
|
||||
type RichTransport struct {
|
||||
TTransport
|
||||
}
|
||||
|
||||
// Wraps Transport to provide TRichTransport interface
|
||||
func NewTRichTransport(trans TTransport) *RichTransport {
|
||||
return &RichTransport{trans}
|
||||
}
|
||||
|
||||
func (r *RichTransport) ReadByte() (c byte, err error) {
|
||||
return readByte(r.TTransport)
|
||||
}
|
||||
|
||||
func (r *RichTransport) WriteByte(c byte) error {
|
||||
return writeByte(r.TTransport, c)
|
||||
}
|
||||
|
||||
func (r *RichTransport) WriteString(s string) (n int, err error) {
|
||||
return r.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return r.TTransport.RemainingBytes()
|
||||
}
|
||||
|
||||
func readByte(r io.Reader) (c byte, err error) {
|
||||
v := [1]byte{0}
|
||||
n, err := r.Read(v[0:1])
|
||||
if n > 0 && (err == nil || err == io.EOF) {
|
||||
return v[0], nil
|
||||
}
|
||||
if n > 0 && err != nil {
|
||||
return v[0], err
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return v[0], nil
|
||||
}
|
||||
|
||||
func writeByte(w io.Writer, c byte) error {
|
||||
v := [1]byte{c}
|
||||
_, err := w.Write(v[0:1])
|
||||
return err
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TSerializer struct {
|
||||
Transport *TMemoryBuffer
|
||||
Protocol TProtocol
|
||||
}
|
||||
|
||||
type TStruct interface {
|
||||
Write(p TProtocol) error
|
||||
Read(p TProtocol) error
|
||||
}
|
||||
|
||||
func NewTSerializer() *TSerializer {
|
||||
transport := NewTMemoryBufferLen(1024)
|
||||
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
|
||||
|
||||
return &TSerializer{
|
||||
transport,
|
||||
protocol}
|
||||
}
|
||||
|
||||
func (t *TSerializer) WriteString(msg TStruct) (s string, err error) {
|
||||
t.Transport.Reset()
|
||||
|
||||
if err = msg.Write(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.Protocol.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = t.Transport.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return t.Transport.String(), nil
|
||||
}
|
||||
|
||||
func (t *TSerializer) Write(msg TStruct) (b []byte, err error) {
|
||||
t.Transport.Reset()
|
||||
|
||||
if err = msg.Write(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.Protocol.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = t.Transport.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b = append(b, t.Transport.Bytes()...)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TServer interface {
|
||||
ProcessorFactory() TProcessorFactory
|
||||
ServerTransport() TServerTransport
|
||||
InputTransportFactory() TTransportFactory
|
||||
OutputTransportFactory() TTransportFactory
|
||||
InputProtocolFactory() TProtocolFactory
|
||||
OutputProtocolFactory() TProtocolFactory
|
||||
|
||||
// Starts the server
|
||||
Serve() error
|
||||
// Stops the server. This is optional on a per-implementation basis. Not
|
||||
// all servers are required to be cleanly stoppable.
|
||||
Stop() error
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
|
||||
// Protects the interrupted value to make it thread safe.
|
||||
mu sync.RWMutex
|
||||
interrupted bool
|
||||
}
|
||||
|
||||
func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
|
||||
return NewTServerSocketTimeout(listenAddr, 0)
|
||||
}
|
||||
|
||||
func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Listen() error {
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := net.Listen(p.addr.Network(), p.addr.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Accept() (TTransport, error) {
|
||||
p.mu.RLock()
|
||||
interrupted := p.interrupted
|
||||
p.mu.RUnlock()
|
||||
|
||||
if interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
if p.listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
conn, err := p.listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TServerSocket) Open() error {
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Addr() net.Addr {
|
||||
if p.listener != nil {
|
||||
return p.listener.Addr()
|
||||
}
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Interrupt() error {
|
||||
p.mu.Lock()
|
||||
p.interrupted = true
|
||||
p.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Server transport. Object which provides client transports.
|
||||
type TServerTransport interface {
|
||||
Listen() error
|
||||
Accept() (TTransport, error)
|
||||
Close() error
|
||||
|
||||
// Optional method implementation. This signals to the server transport
|
||||
// that it should break out of any accept() or listen() that it is currently
|
||||
// blocked on. This method, if implemented, MUST be thread safe, as it may
|
||||
// be called from a different thread context than the other TServerTransport
|
||||
// methods.
|
||||
Interrupt() error
|
||||
}
|
1320
vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go
generated
vendored
Normal file
1320
vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
// Simple, non-concurrent server for testing.
|
||||
type TSimpleServer struct {
|
||||
quit chan struct{}
|
||||
|
||||
processorFactory TProcessorFactory
|
||||
serverTransport TServerTransport
|
||||
inputTransportFactory TTransportFactory
|
||||
outputTransportFactory TTransportFactory
|
||||
inputProtocolFactory TProtocolFactory
|
||||
outputProtocolFactory TProtocolFactory
|
||||
}
|
||||
|
||||
func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
|
||||
}
|
||||
|
||||
func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
inputTransportFactory,
|
||||
outputTransportFactory,
|
||||
inputProtocolFactory,
|
||||
outputProtocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
NewTTransportFactory(),
|
||||
NewTTransportFactory(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return &TSimpleServer{
|
||||
processorFactory: processorFactory,
|
||||
serverTransport: serverTransport,
|
||||
inputTransportFactory: inputTransportFactory,
|
||||
outputTransportFactory: outputTransportFactory,
|
||||
inputProtocolFactory: inputProtocolFactory,
|
||||
outputProtocolFactory: outputProtocolFactory,
|
||||
quit: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
|
||||
return p.processorFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ServerTransport() TServerTransport {
|
||||
return p.serverTransport
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
|
||||
return p.inputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
|
||||
return p.outputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
|
||||
return p.inputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
|
||||
return p.outputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Listen() error {
|
||||
return p.serverTransport.Listen()
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) AcceptLoop() error {
|
||||
for {
|
||||
client, err := p.serverTransport.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-p.quit:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return err
|
||||
}
|
||||
if client != nil {
|
||||
go func() {
|
||||
if err := p.processRequests(client); err != nil {
|
||||
log.Println("error processing request:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Serve() error {
|
||||
err := p.Listen()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.AcceptLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Stop() error {
|
||||
p.quit <- struct{}{}
|
||||
p.serverTransport.Interrupt()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) processRequests(client TTransport) error {
|
||||
processor := p.processorFactory.GetProcessor(client)
|
||||
inputTransport := p.inputTransportFactory.GetTransport(client)
|
||||
outputTransport := p.outputTransportFactory.GetTransport(client)
|
||||
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
|
||||
outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
log.Printf("panic in processor: %s: %s", e, debug.Stack())
|
||||
}
|
||||
}()
|
||||
if inputTransport != nil {
|
||||
defer inputTransport.Close()
|
||||
}
|
||||
if outputTransport != nil {
|
||||
defer outputTransport.Close()
|
||||
}
|
||||
for {
|
||||
ok, err := processor.Process(inputProtocol, outputProtocol)
|
||||
if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
log.Printf("error processing request: %s", err)
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSocket struct {
|
||||
conn net.Conn
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewTSocket creates a net.Conn-backed TTransport, given a host and port
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSocket("localhost:9090")
|
||||
func NewTSocket(hostPort string) (*TSocket, error) {
|
||||
return NewTSocketTimeout(hostPort, 0)
|
||||
}
|
||||
|
||||
// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a timeout as a time.Duration
|
||||
func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) {
|
||||
//conn, err := net.DialTimeout(network, address, timeout)
|
||||
addr, err := net.ResolveTCPAddr("tcp", hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTSocketFromAddrTimeout(addr, timeout), nil
|
||||
}
|
||||
|
||||
// Creates a TSocket from a net.Addr
|
||||
func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket {
|
||||
return &TSocket{addr: addr, timeout: timeout}
|
||||
}
|
||||
|
||||
// Creates a TSocket from an existing net.Conn
|
||||
func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket {
|
||||
return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSocket) Open() error {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
var err error
|
||||
if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//Returns the remote address of the socket.
|
||||
func (p *TSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSocket) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
109
vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go
generated
vendored
Normal file
109
vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
type TSSLServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
interrupted bool
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
|
||||
return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
|
||||
}
|
||||
|
||||
func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Listen() error {
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Accept() (TTransport, error) {
|
||||
if p.interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
if p.listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
conn, err := p.listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TSSLServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLServerSocket) Open() error {
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Interrupt() error {
|
||||
p.interrupted = true
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSSLSocket struct {
|
||||
conn net.Conn
|
||||
// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
|
||||
// only valid if addr is nil.
|
||||
hostPort string
|
||||
// addr is nil when hostPort is not "", and is only used when the
|
||||
// TSSLSocket is constructed from a net.Addr.
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSSLSocket("localhost:9090", nil)
|
||||
func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
|
||||
return NewTSSLSocketTimeout(hostPort, cfg, 0)
|
||||
}
|
||||
|
||||
// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a tls Configuration and a timeout as a time.Duration
|
||||
func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
|
||||
return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from a net.Addr
|
||||
func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from an existing net.Conn
|
||||
func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSSLSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLSocket) Open() error {
|
||||
var err error
|
||||
// If we have a hostname, we need to pass the hostname to tls.Dial for
|
||||
// certificate hostname checks.
|
||||
if p.hostPort != "" {
|
||||
if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
} else {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSSLSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSSLSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSSLSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var errTransportInterrupted = errors.New("Transport Interrupted")
|
||||
|
||||
type Flusher interface {
|
||||
Flush() (err error)
|
||||
}
|
||||
|
||||
type ReadSizeProvider interface {
|
||||
RemainingBytes() (num_bytes uint64)
|
||||
}
|
||||
|
||||
|
||||
// Encapsulates the I/O layer
|
||||
type TTransport interface {
|
||||
io.ReadWriteCloser
|
||||
Flusher
|
||||
ReadSizeProvider
|
||||
|
||||
// Opens the transport for communication
|
||||
Open() error
|
||||
|
||||
// Returns true if the transport is open
|
||||
IsOpen() bool
|
||||
}
|
||||
|
||||
type stringWriter interface {
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
|
||||
// This is "enchanced" transport with extra capabilities. You need to use one of these
|
||||
// to construct protocol.
|
||||
// Notably, TSocket does not implement this interface, and it is always a mistake to use
|
||||
// TSocket directly in protocol.
|
||||
type TRichTransport interface {
|
||||
io.ReadWriter
|
||||
io.ByteReader
|
||||
io.ByteWriter
|
||||
stringWriter
|
||||
Flusher
|
||||
ReadSizeProvider
|
||||
}
|
||||
|
90
vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go
generated
vendored
Normal file
90
vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
type timeoutable interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
// Thrift Transport exception
|
||||
type TTransportException interface {
|
||||
TException
|
||||
TypeId() int
|
||||
Err() error
|
||||
}
|
||||
|
||||
const (
|
||||
UNKNOWN_TRANSPORT_EXCEPTION = 0
|
||||
NOT_OPEN = 1
|
||||
ALREADY_OPEN = 2
|
||||
TIMED_OUT = 3
|
||||
END_OF_FILE = 4
|
||||
)
|
||||
|
||||
type tTransportException struct {
|
||||
typeId int
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *tTransportException) TypeId() int {
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *tTransportException) Error() string {
|
||||
return p.err.Error()
|
||||
}
|
||||
|
||||
func (p *tTransportException) Err() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
func NewTTransportException(t int, e string) TTransportException {
|
||||
return &tTransportException{typeId: t, err: errors.New(e)}
|
||||
}
|
||||
|
||||
func NewTTransportExceptionFromError(e error) TTransportException {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if t, ok := e.(TTransportException); ok {
|
||||
return t
|
||||
}
|
||||
|
||||
switch v := e.(type) {
|
||||
case TTransportException:
|
||||
return v
|
||||
case timeoutable:
|
||||
if v.Timeout() {
|
||||
return &tTransportException{typeId: TIMED_OUT, err: e}
|
||||
}
|
||||
}
|
||||
|
||||
if e == io.EOF {
|
||||
return &tTransportException{typeId: END_OF_FILE, err: e}
|
||||
}
|
||||
|
||||
return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e}
|
||||
}
|
39
vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go
generated
vendored
Normal file
39
vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Factory class used to create wrapped instance of Transports.
|
||||
// This is used primarily in servers, which get Transports from
|
||||
// a ServerTransport and then may want to mutate them (i.e. create
|
||||
// a BufferedTransport from the underlying base transport)
|
||||
type TTransportFactory interface {
|
||||
GetTransport(trans TTransport) TTransport
|
||||
}
|
||||
|
||||
type tTransportFactory struct{}
|
||||
|
||||
// Return a wrapped instance of the base Transport.
|
||||
func (p *tTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
return trans
|
||||
}
|
||||
|
||||
func NewTTransportFactory() TTransportFactory {
|
||||
return &tTransportFactory{}
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Type constants in the Thrift protocol
|
||||
type TType byte
|
||||
|
||||
const (
|
||||
STOP = 0
|
||||
VOID = 1
|
||||
BOOL = 2
|
||||
BYTE = 3
|
||||
I08 = 3
|
||||
DOUBLE = 4
|
||||
I16 = 6
|
||||
I32 = 8
|
||||
I64 = 10
|
||||
STRING = 11
|
||||
UTF7 = 11
|
||||
STRUCT = 12
|
||||
MAP = 13
|
||||
SET = 14
|
||||
LIST = 15
|
||||
UTF8 = 16
|
||||
UTF16 = 17
|
||||
BINARY = 18
|
||||
)
|
||||
|
||||
var typeNames = map[int]string{
|
||||
STOP: "STOP",
|
||||
VOID: "VOID",
|
||||
BOOL: "BOOL",
|
||||
BYTE: "BYTE",
|
||||
I16: "I16",
|
||||
I32: "I32",
|
||||
I64: "I64",
|
||||
STRING: "STRING",
|
||||
STRUCT: "STRUCT",
|
||||
MAP: "MAP",
|
||||
SET: "SET",
|
||||
LIST: "LIST",
|
||||
UTF8: "UTF8",
|
||||
UTF16: "UTF16",
|
||||
}
|
||||
|
||||
func (p TType) String() string {
|
||||
if s, ok := typeNames[int(p)]; ok {
|
||||
return s
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// TZlibTransportFactory is a factory for TZlibTransport instances
|
||||
type TZlibTransportFactory struct {
|
||||
level int
|
||||
}
|
||||
|
||||
// TZlibTransport is a TTransport implementation that makes use of zlib compression.
|
||||
type TZlibTransport struct {
|
||||
reader io.ReadCloser
|
||||
transport TTransport
|
||||
writer *zlib.Writer
|
||||
}
|
||||
|
||||
// GetTransport constructs a new instance of NewTZlibTransport
|
||||
func (p *TZlibTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
t, _ := NewTZlibTransport(trans, p.level)
|
||||
return t
|
||||
}
|
||||
|
||||
// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
|
||||
func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
|
||||
return &TZlibTransportFactory{level: level}
|
||||
}
|
||||
|
||||
// NewTZlibTransport constructs a new instance of TZlibTransport
|
||||
func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
|
||||
w, err := zlib.NewWriterLevel(trans, level)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TZlibTransport{
|
||||
writer: w,
|
||||
transport: trans,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the reader and writer (flushing any unwritten data) and closes
|
||||
// the underlying transport.
|
||||
func (z *TZlibTransport) Close() error {
|
||||
if z.reader != nil {
|
||||
if err := z.reader.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := z.writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Close()
|
||||
}
|
||||
|
||||
// Flush flushes the writer and its underlying transport.
|
||||
func (z *TZlibTransport) Flush() error {
|
||||
if err := z.writer.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Flush()
|
||||
}
|
||||
|
||||
// IsOpen returns true if the transport is open
|
||||
func (z *TZlibTransport) IsOpen() bool {
|
||||
return z.transport.IsOpen()
|
||||
}
|
||||
|
||||
// Open opens the transport for communication
|
||||
func (z *TZlibTransport) Open() error {
|
||||
return z.transport.Open()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Read(p []byte) (int, error) {
|
||||
if z.reader == nil {
|
||||
r, err := zlib.NewReader(z.transport)
|
||||
if err != nil {
|
||||
return 0, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
z.reader = r
|
||||
}
|
||||
|
||||
return z.reader.Read(p)
|
||||
}
|
||||
|
||||
// RemainingBytes returns the size in bytes of the data that is still to be
|
||||
// read.
|
||||
func (z *TZlibTransport) RemainingBytes() uint64 {
|
||||
return z.transport.RemainingBytes()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Write(p []byte) (int, error) {
|
||||
return z.writer.Write(p)
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,316 @@
|
|||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,67 @@
|
|||
# xxhash
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
||||
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
|
@ -0,0 +1,3 @@
|
|||
module github.com/cespare/xxhash/v2
|
||||
|
||||
go 1.11
|
|
@ -0,0 +1,236 @@
|
|||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
|
@ -0,0 +1,13 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
|
@ -0,0 +1,215 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
|
||||
RET
|
|
@ -0,0 +1,76 @@
|
|||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
}
|
0
vendor/k8s.io/apimachinery/LICENSE → vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
0
vendor/k8s.io/apimachinery/LICENSE → vendor/github.com/coreos/go-semver/LICENSE
generated
vendored
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2018 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
|
@ -0,0 +1,296 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Semantic Versions http://semver.org
|
||||
package semver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Version struct {
|
||||
Major int64
|
||||
Minor int64
|
||||
Patch int64
|
||||
PreRelease PreRelease
|
||||
Metadata string
|
||||
}
|
||||
|
||||
type PreRelease string
|
||||
|
||||
func splitOff(input *string, delim string) (val string) {
|
||||
parts := strings.SplitN(*input, delim, 2)
|
||||
|
||||
if len(parts) == 2 {
|
||||
*input = parts[0]
|
||||
val = parts[1]
|
||||
}
|
||||
|
||||
return val
|
||||
}
|
||||
|
||||
func New(version string) *Version {
|
||||
return Must(NewVersion(version))
|
||||
}
|
||||
|
||||
func NewVersion(version string) (*Version, error) {
|
||||
v := Version{}
|
||||
|
||||
if err := v.Set(version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &v, nil
|
||||
}
|
||||
|
||||
// Must is a helper for wrapping NewVersion and will panic if err is not nil.
|
||||
func Must(v *Version, err error) *Version {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Set parses and updates v from the given version string. Implements flag.Value
|
||||
func (v *Version) Set(version string) error {
|
||||
metadata := splitOff(&version, "+")
|
||||
preRelease := PreRelease(splitOff(&version, "-"))
|
||||
dotParts := strings.SplitN(version, ".", 3)
|
||||
|
||||
if len(dotParts) != 3 {
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(string(preRelease)); err != nil {
|
||||
return fmt.Errorf("failed to validate pre-release: %v", err)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(metadata); err != nil {
|
||||
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
val, err := strconv.ParseInt(v, 10, 64)
|
||||
parsed[i] = val
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
v.Metadata = metadata
|
||||
v.PreRelease = preRelease
|
||||
v.Major = parsed[0]
|
||||
v.Minor = parsed[1]
|
||||
v.Patch = parsed[2]
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
|
||||
if v.PreRelease != "" {
|
||||
fmt.Fprintf(&buffer, "-%s", v.PreRelease)
|
||||
}
|
||||
|
||||
if v.Metadata != "" {
|
||||
fmt.Fprintf(&buffer, "+%s", v.Metadata)
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||
var data string
|
||||
if err := unmarshal(&data); err != nil {
|
||||
return err
|
||||
}
|
||||
return v.Set(data)
|
||||
}
|
||||
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + v.String() + `"`), nil
|
||||
}
|
||||
|
||||
func (v *Version) UnmarshalJSON(data []byte) error {
|
||||
l := len(data)
|
||||
if l == 0 || string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if l < 2 || data[0] != '"' || data[l-1] != '"' {
|
||||
return errors.New("invalid semver string")
|
||||
}
|
||||
return v.Set(string(data[1 : l-1]))
|
||||
}
|
||||
|
||||
// Compare tests if v is less than, equal to, or greater than versionB,
|
||||
// returning -1, 0, or +1 respectively.
|
||||
func (v Version) Compare(versionB Version) int {
|
||||
if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return preReleaseCompare(v, versionB)
|
||||
}
|
||||
|
||||
// Equal tests if v is equal to versionB.
|
||||
func (v Version) Equal(versionB Version) bool {
|
||||
return v.Compare(versionB) == 0
|
||||
}
|
||||
|
||||
// LessThan tests if v is less than versionB.
|
||||
func (v Version) LessThan(versionB Version) bool {
|
||||
return v.Compare(versionB) < 0
|
||||
}
|
||||
|
||||
// Slice converts the comparable parts of the semver into a slice of integers.
|
||||
func (v Version) Slice() []int64 {
|
||||
return []int64{v.Major, v.Minor, v.Patch}
|
||||
}
|
||||
|
||||
func (p PreRelease) Slice() []string {
|
||||
preRelease := string(p)
|
||||
return strings.Split(preRelease, ".")
|
||||
}
|
||||
|
||||
func preReleaseCompare(versionA Version, versionB Version) int {
|
||||
a := versionA.PreRelease
|
||||
b := versionB.PreRelease
|
||||
|
||||
/* Handle the case where if two versions are otherwise equal it is the
|
||||
* one without a PreRelease that is greater */
|
||||
if len(a) == 0 && (len(b) > 0) {
|
||||
return 1
|
||||
} else if len(b) == 0 && (len(a) > 0) {
|
||||
return -1
|
||||
}
|
||||
|
||||
// If there is a prerelease, check and compare each part.
|
||||
return recursivePreReleaseCompare(a.Slice(), b.Slice())
|
||||
}
|
||||
|
||||
func recursiveCompare(versionA []int64, versionB []int64) int {
|
||||
if len(versionA) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursiveCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
func recursivePreReleaseCompare(versionA []string, versionB []string) int {
|
||||
// A larger set of pre-release fields has a higher precedence than a smaller set,
|
||||
// if all of the preceding identifiers are equal.
|
||||
if len(versionA) == 0 {
|
||||
if len(versionB) > 0 {
|
||||
return -1
|
||||
}
|
||||
return 0
|
||||
} else if len(versionB) == 0 {
|
||||
// We're longer than versionB so return 1.
|
||||
return 1
|
||||
}
|
||||
|
||||
a := versionA[0]
|
||||
b := versionB[0]
|
||||
|
||||
aInt := false
|
||||
bInt := false
|
||||
|
||||
aI, err := strconv.Atoi(versionA[0])
|
||||
if err == nil {
|
||||
aInt = true
|
||||
}
|
||||
|
||||
bI, err := strconv.Atoi(versionB[0])
|
||||
if err == nil {
|
||||
bInt = true
|
||||
}
|
||||
|
||||
// Numeric identifiers always have lower precedence than non-numeric identifiers.
|
||||
if aInt && !bInt {
|
||||
return -1
|
||||
} else if !aInt && bInt {
|
||||
return 1
|
||||
}
|
||||
|
||||
// Handle Integer Comparison
|
||||
if aInt && bInt {
|
||||
if aI > bI {
|
||||
return 1
|
||||
} else if aI < bI {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Handle String Comparison
|
||||
if a > b {
|
||||
return 1
|
||||
} else if a < b {
|
||||
return -1
|
||||
}
|
||||
|
||||
return recursivePreReleaseCompare(versionA[1:], versionB[1:])
|
||||
}
|
||||
|
||||
// BumpMajor increments the Major field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMajor() {
|
||||
v.Major += 1
|
||||
v.Minor = 0
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpMinor() {
|
||||
v.Minor += 1
|
||||
v.Patch = 0
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
|
||||
func (v *Version) BumpPatch() {
|
||||
v.Patch += 1
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||
func validateIdentifier(id string) error {
|
||||
if id != "" && !reIdentifier.MatchString(id) {
|
||||
return fmt.Errorf("%s is not a valid semver identifier", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||
// identifiers satisfy the spec requirements
|
||||
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2013-2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type Versions []*Version
|
||||
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LessThan(*s[j])
|
||||
}
|
||||
|
||||
// Sort sorts the given slice of Version
|
||||
func Sort(versions []*Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,5 @@
|
|||
CoreOS Project
|
||||
Copyright 2018 CoreOS, Inc
|
||||
|
||||
This product includes software developed at CoreOS, Inc.
|
||||
(http://www.coreos.com/).
|
|
@ -0,0 +1,225 @@
|
|||
// Copyright 2015 CoreOS, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package journal provides write bindings to the local systemd journal.
|
||||
// It is implemented in pure Go and connects to the journal directly over its
|
||||
// unix socket.
|
||||
//
|
||||
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||
// sd-journal a C API.
|
||||
//
|
||||
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||
package journal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Priority of a journal message
|
||||
type Priority int
|
||||
|
||||
const (
|
||||
PriEmerg Priority = iota
|
||||
PriAlert
|
||||
PriCrit
|
||||
PriErr
|
||||
PriWarning
|
||||
PriNotice
|
||||
PriInfo
|
||||
PriDebug
|
||||
)
|
||||
|
||||
var (
|
||||
// This can be overridden at build-time:
|
||||
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
|
||||
journalSocket = "/run/systemd/journal/socket"
|
||||
|
||||
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
|
||||
// Concrete safe pointer type: *net.UnixConn
|
||||
unixConnPtr unsafe.Pointer
|
||||
// onceConn ensures that unixConnPtr is initialized exactly once.
|
||||
onceConn sync.Once
|
||||
)
|
||||
|
||||
func init() {
|
||||
onceConn.Do(initConn)
|
||||
}
|
||||
|
||||
// Enabled checks whether the local systemd journal is available for logging.
|
||||
func Enabled() bool {
|
||||
onceConn.Do(initConn)
|
||||
|
||||
if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if _, err := net.Dial("unixgram", journalSocket); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Send a message to the local systemd journal. vars is a map of journald
|
||||
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||
// and underscores, but must not start with an underscore. Within these
|
||||
// restrictions, any arbitrary field name may be used. Some names have special
|
||||
// significance: see the journalctl documentation
|
||||
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||
// for more details. vars may be nil.
|
||||
func Send(message string, priority Priority, vars map[string]string) error {
|
||||
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||
if conn == nil {
|
||||
return errors.New("could not initialize socket to journald")
|
||||
}
|
||||
|
||||
socketAddr := &net.UnixAddr{
|
||||
Name: journalSocket,
|
||||
Net: "unixgram",
|
||||
}
|
||||
|
||||
data := new(bytes.Buffer)
|
||||
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||
appendVariable(data, "MESSAGE", message)
|
||||
for k, v := range vars {
|
||||
appendVariable(data, k, v)
|
||||
}
|
||||
|
||||
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if !isSocketSpaceError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// Large log entry, send it via tempfile and ancillary-fd.
|
||||
file, err := tempFd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
_, err = io.Copy(file, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rights := syscall.UnixRights(int(file.Fd()))
|
||||
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Print prints a message to the local systemd journal using Send().
|
||||
func Print(priority Priority, format string, a ...interface{}) error {
|
||||
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||
}
|
||||
|
||||
func appendVariable(w io.Writer, name, value string) {
|
||||
if err := validVarName(name); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
|
||||
}
|
||||
if strings.ContainsRune(value, '\n') {
|
||||
/* When the value contains a newline, we write:
|
||||
* - the variable name, followed by a newline
|
||||
* - the size (in 64bit little endian format)
|
||||
* - the data, followed by a newline
|
||||
*/
|
||||
fmt.Fprintln(w, name)
|
||||
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||
fmt.Fprintln(w, value)
|
||||
} else {
|
||||
/* just write the variable and value all on one line */
|
||||
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||
}
|
||||
}
|
||||
|
||||
// validVarName validates a variable name to make sure journald will accept it.
|
||||
// The variable name must be in uppercase and consist only of characters,
|
||||
// numbers and underscores, and may not begin with an underscore:
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
|
||||
func validVarName(name string) error {
|
||||
if name == "" {
|
||||
return errors.New("Empty variable name")
|
||||
} else if name[0] == '_' {
|
||||
return errors.New("Variable name begins with an underscore")
|
||||
}
|
||||
|
||||
for _, c := range name {
|
||||
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
|
||||
return errors.New("Variable name contains invalid characters")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isSocketSpaceError checks whether the error is signaling
|
||||
// an "overlarge message" condition.
|
||||
func isSocketSpaceError(err error) bool {
|
||||
opErr, ok := err.(*net.OpError)
|
||||
if !ok || opErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
sysErr, ok := opErr.Err.(*os.SyscallError)
|
||||
if !ok || sysErr == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
|
||||
}
|
||||
|
||||
// tempFd creates a temporary, unlinked file under `/dev/shm`.
|
||||
func tempFd() (*os.File, error) {
|
||||
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = syscall.Unlink(file.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
|
||||
// initConn initializes the global `unixConnPtr` socket.
|
||||
// It is meant to be called exactly once, at program startup.
|
||||
func initConn() {
|
||||
autobind, err := net.ResolveUnixAddr("unixgram", "")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sock, err := net.ListenUnixgram("unixgram", autobind)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue