diff --git a/Gopkg.lock b/Gopkg.lock index 91a633c..68b5495 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -9,6 +9,14 @@ revision = "b26d9c308763d68093482582cea63d69be07a0f0" version = "v0.3.0" +[[projects]] + branch = "master" + digest = "1:867b9f2be08ff92c487e1b653d18a879a811aae72404fff0fd8de752adaf6c39" + name = "github.com/antchfx/xpath" + packages = ["."] + pruneopts = "NUT" + revision = "3de91f3991a1af6e495d49c9218318b5544b20e3" + [[projects]] digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" name = "github.com/davecgh/go-spew" @@ -37,7 +45,7 @@ version = "v1.4.0" [[projects]] - digest = "1:1ba798075c507fb2513d955f8c04314acdb233b269671224a602ed4dc550eb64" + digest = "1:983cb7d70588854e2706e32c61698bbf0eab4956fca89e86c1c10d7213679a0e" name = "github.com/gogo/protobuf" packages = [ "gogoproto", @@ -47,8 +55,8 @@ "types", ] pruneopts = "NUT" - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" [[projects]] digest = "1:03e14cff610a8a58b774e36bd337fa979482be86aab01be81fb8bbd6d0f07fc8" @@ -215,7 +223,7 @@ revision = "ff3583edef7de132f219f0efc00e097cabcc0ec0" [[projects]] - digest = "1:d0e5846da58e4e20d1bbd7502b24f31d37e3ac1e02a9042d95bd93c2d0c139dd" + digest = "1:5b805b8e03b29399b344655cac16873f026e54dc0a7c17b381f6f4d4c7b6d741" name = "google.golang.org/grpc" packages = [ ".", @@ -231,7 +239,9 @@ "internal", "internal/backoff", "internal/channelz", + "internal/envconfig", "internal/grpcrand", + "internal/transport", "keepalive", "metadata", "naming", @@ -242,11 +252,10 @@ "stats", "status", "tap", - "transport", ] pruneopts = "NUT" - revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8" - version = "v1.13.0" + revision = "8dea3dc473e90c8179e519d91302d0597c0ca1d1" + version = "v1.15.0" [[projects]] digest = "1:4fe5671ee9615984167403a07ea67e0cbb7396f3ab335294ef767ac79b972ffc" @@ -257,15 +266,15 @@ version = "v1.3.0" [[projects]] - digest = "1:c2d45df8b6f2ae9c80f660ba4b634a39b02fdd4e520f557a75ca5db6d9aa579d" + digest = "1:ace997f8ef569276114f7a4e2a8b44cc8ee34b4d756ccbeb73f612589d98b0b3" name = "gopkg.in/bblfsh/client-go.v2" packages = [ ".", "tools", ] pruneopts = "UT" - revision = "78d31dfeb56c06d98fd56c0da270559e8002968d" - version = "v2.6.1" + revision = "47a750b0793f5094635bd013001c435bacc6ac48" + version = "v2.8.8" [[projects]] digest = "1:85ce4b06f191dd616ad7f4377843ea08db7db003757bd0a67e1b3ac4cbd5c2ef" @@ -279,6 +288,27 @@ revision = "96e92bdde3785e655b8095a576addd52b96c6c61" version = "v1.16.0" +[[projects]] + digest = "1:c86f8b8c3a3b44c31190b0a70d1adbab4fc264a2245ee893e572658bcd40dead" + name = "gopkg.in/bblfsh/sdk.v2" + packages = [ + "driver", + "driver/manifest", + "protocol", + "protocol/v1", + "uast", + "uast/nodes", + "uast/nodes/nodesproto", + "uast/nodes/nodesproto/pio", + "uast/query", + "uast/query/xpath", + "uast/role", + "uast/transformer", + ] + pruneopts = "NUT" + revision = "85fd47713ba40fc3bffe4214090a552074841431" + version = "v2.2.3" + [[projects]] digest = "1:d11329b27b0f34c399185531f3232493d7da40f576574da04776812b0a94f66b" name = "gopkg.in/src-d/enry.v1" diff --git a/Gopkg.toml b/Gopkg.toml index 46892dd..dcb04f6 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -44,7 +44,7 @@ [[constraint]] name = "gopkg.in/bblfsh/client-go.v2" - version = "2.6.1" + version = "2.8.4" [[constraint]] name = "gopkg.in/DATA-DOG/go-sqlmock.v1" diff --git a/server/handler/uast.go b/server/handler/uast.go index dd870e6..779f377 100644 --- a/server/handler/uast.go +++ b/server/handler/uast.go @@ -52,13 +52,10 @@ func Parse(bbblfshServerURL string) RequestProcessFunc { Language(req.Language). Filename(req.Filename). Content(req.Content). + Mode(bblfsh.Semantic). Do() if err != nil { - if bblfshErr, ok := err.(bblfsh.FatalError); ok { - return nil, serializer.NewHTTPError(http.StatusBadRequest, bblfshErr.Error()) - } - - return nil, err + return nil, serializer.NewHTTPError(http.StatusBadRequest, err.Error()) } if resp.Status == protocol.Error { diff --git a/vendor/github.com/antchfx/xpath/LICENSE b/vendor/github.com/antchfx/xpath/LICENSE new file mode 100644 index 0000000..e14c371 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/LICENSE @@ -0,0 +1,17 @@ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/build.go b/vendor/github.com/antchfx/xpath/build.go new file mode 100644 index 0000000..74f266b --- /dev/null +++ b/vendor/github.com/antchfx/xpath/build.go @@ -0,0 +1,483 @@ +package xpath + +import ( + "errors" + "fmt" +) + +type flag int + +const ( + noneFlag flag = iota + filterFlag +) + +// builder provides building an XPath expressions. +type builder struct { + depth int + flag flag + firstInput query +} + +// axisPredicate creates a predicate to predicating for this axis node. +func axisPredicate(root *axisNode) func(NodeNavigator) bool { + // get current axix node type. + typ := ElementNode + switch root.AxeType { + case "attribute": + typ = AttributeNode + case "self", "parent": + typ = allNode + default: + switch root.Prop { + case "comment": + typ = CommentNode + case "text": + typ = TextNode + // case "processing-instruction": + // typ = ProcessingInstructionNode + case "node": + typ = allNode + } + } + nametest := root.LocalName != "" || root.Prefix != "" + predicate := func(n NodeNavigator) bool { + if typ == n.NodeType() || typ == allNode || typ == TextNode { + if nametest { + if root.LocalName == n.LocalName() && root.Prefix == n.Prefix() { + return true + } + } else { + return true + } + } + return false + } + + return predicate +} + +// processAxisNode processes a query for the XPath axis node. +func (b *builder) processAxisNode(root *axisNode) (query, error) { + var ( + err error + qyInput query + qyOutput query + predicate = axisPredicate(root) + ) + + if root.Input == nil { + qyInput = &contextQuery{} + } else { + if root.AxeType == "child" && (root.Input.Type() == nodeAxis) { + if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" { + var qyGrandInput query + if input.Input != nil { + qyGrandInput, _ = b.processNode(input.Input) + } else { + qyGrandInput = &contextQuery{} + } + qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true} + return qyOutput, nil + } + } + qyInput, err = b.processNode(root.Input) + if err != nil { + return nil, err + } + } + + switch root.AxeType { + case "ancestor": + qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate} + case "ancestor-or-self": + qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate, Self: true} + case "attribute": + qyOutput = &attributeQuery{Input: qyInput, Predicate: predicate} + case "child": + filter := func(n NodeNavigator) bool { + v := predicate(n) + switch root.Prop { + case "text": + v = v && n.NodeType() == TextNode + case "node": + v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode) + case "comment": + v = v && n.NodeType() == CommentNode + } + return v + } + qyOutput = &childQuery{Input: qyInput, Predicate: filter} + case "descendant": + qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate} + case "descendant-or-self": + qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate, Self: true} + case "following": + qyOutput = &followingQuery{Input: qyInput, Predicate: predicate} + case "following-sibling": + qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true} + case "parent": + qyOutput = &parentQuery{Input: qyInput, Predicate: predicate} + case "preceding": + qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate} + case "preceding-sibling": + qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true} + case "self": + qyOutput = &selfQuery{Input: qyInput, Predicate: predicate} + case "namespace": + // haha,what will you do someting?? + default: + err = fmt.Errorf("unknown axe type: %s", root.AxeType) + return nil, err + } + return qyOutput, nil +} + +// processFilterNode builds query for the XPath filter predicate. +func (b *builder) processFilterNode(root *filterNode) (query, error) { + b.flag |= filterFlag + + qyInput, err := b.processNode(root.Input) + if err != nil { + return nil, err + } + qyCond, err := b.processNode(root.Condition) + if err != nil { + return nil, err + } + qyOutput := &filterQuery{Input: qyInput, Predicate: qyCond} + return qyOutput, nil +} + +// processFunctionNode processes query for the XPath function node. +func (b *builder) processFunctionNode(root *functionNode) (query, error) { + var qyOutput query + switch root.FuncName { + case "starts-with": + arg1, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1]) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)} + case "ends-with": + arg1, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1]) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: b.firstInput, Func: endwithFunc(arg1, arg2)} + case "contains": + arg1, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + arg2, err := b.processNode(root.Args[1]) + if err != nil { + return nil, err + } + + qyOutput = &functionQuery{Input: b.firstInput, Func: containsFunc(arg1, arg2)} + case "substring": + //substring( string , start [, length] ) + if len(root.Args) < 2 { + return nil, errors.New("xpath: substring function must have at least two parameter") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0]); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1]); err != nil { + return nil, err + } + if len(root.Args) == 3 { + if arg3, err = b.processNode(root.Args[2]); err != nil { + return nil, err + } + } + qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)} + case "substring-before", "substring-after": + //substring-xxxx( haystack, needle ) + if len(root.Args) != 2 { + return nil, errors.New("xpath: substring-before function must have two parameters") + } + var ( + arg1, arg2 query + err error + ) + if arg1, err = b.processNode(root.Args[0]); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1]); err != nil { + return nil, err + } + qyOutput = &functionQuery{ + Input: b.firstInput, + Func: substringIndFunc(arg1, arg2, root.FuncName == "substring-after"), + } + case "string-length": + // string-length( [string] ) + if len(root.Args) < 1 { + return nil, errors.New("xpath: string-length function must have at least one parameter") + } + arg1, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: b.firstInput, Func: stringLengthFunc(arg1)} + case "normalize-space": + if len(root.Args) == 0 { + return nil, errors.New("xpath: normalize-space function must have at least one parameter") + } + argQuery, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc} + case "translate": + //translate( string , string, string ) + if len(root.Args) != 3 { + return nil, errors.New("xpath: translate function must have three parameters") + } + var ( + arg1, arg2, arg3 query + err error + ) + if arg1, err = b.processNode(root.Args[0]); err != nil { + return nil, err + } + if arg2, err = b.processNode(root.Args[1]); err != nil { + return nil, err + } + if arg3, err = b.processNode(root.Args[2]); err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: b.firstInput, Func: translateFunc(arg1, arg2, arg3)} + case "not": + if len(root.Args) == 0 { + return nil, errors.New("xpath: not function must have at least one parameter") + } + argQuery, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: argQuery, Func: notFunc} + case "name", "local-name", "namespace-uri": + inp := b.firstInput + if len(root.Args) > 1 { + return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName) + } + if len(root.Args) == 1 { + argQuery, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + inp = argQuery + } + f := &functionQuery{Input: inp} + switch root.FuncName { + case "name": + f.Func = nameFunc + case "local-name": + f.Func = localNameFunc + case "namespace-uri": + f.Func = namespaceFunc + } + qyOutput = f + case "true", "false": + val := root.FuncName == "true" + qyOutput = &functionQuery{ + Input: b.firstInput, + Func: func(_ query, _ iterator) interface{} { + return val + }, + } + case "last": + qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc} + case "position": + qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc} + case "boolean", "number", "string": + inp := b.firstInput + if len(root.Args) > 1 { + return nil, fmt.Errorf("xpath: %s function must have at most one parameter", root.FuncName) + } + if len(root.Args) == 1 { + argQuery, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + inp = argQuery + } + f := &functionQuery{Input: inp} + switch root.FuncName { + case "boolean": + f.Func = booleanFunc + case "string": + f.Func = stringFunc + case "number": + f.Func = numberFunc + } + qyOutput = f + case "count": + //if b.firstInput == nil { + // return nil, errors.New("xpath: expression must evaluate to node-set") + //} + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: argQuery, Func: countFunc} + case "sum": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + qyOutput = &functionQuery{Input: argQuery, Func: sumFunc} + case "ceiling", "floor", "round": + if len(root.Args) == 0 { + return nil, fmt.Errorf("xpath: ceiling(node-sets) function must with have parameters node-sets") + } + argQuery, err := b.processNode(root.Args[0]) + if err != nil { + return nil, err + } + f := &functionQuery{Input: argQuery} + switch root.FuncName { + case "ceiling": + f.Func = ceilingFunc + case "floor": + f.Func = floorFunc + case "round": + f.Func = roundFunc + } + qyOutput = f + case "concat": + if len(root.Args) < 2 { + return nil, fmt.Errorf("xpath: concat() must have at least two arguments") + } + var args []query + for _, v := range root.Args { + q, err := b.processNode(v) + if err != nil { + return nil, err + } + args = append(args, q) + } + qyOutput = &functionQuery{Input: b.firstInput, Func: concatFunc(args...)} + default: + return nil, fmt.Errorf("not yet support this function %s()", root.FuncName) + } + return qyOutput, nil +} + +func (b *builder) processOperatorNode(root *operatorNode) (query, error) { + left, err := b.processNode(root.Left) + if err != nil { + return nil, err + } + right, err := b.processNode(root.Right) + if err != nil { + return nil, err + } + var qyOutput query + switch root.Op { + case "+", "-", "div", "mod": // Numeric operator + var exprFunc func(interface{}, interface{}) interface{} + switch root.Op { + case "+": + exprFunc = plusFunc + case "-": + exprFunc = minusFunc + case "div": + exprFunc = divFunc + case "mod": + exprFunc = modFunc + } + qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc} + case "=", ">", ">=", "<", "<=", "!=": + var exprFunc func(iterator, interface{}, interface{}) interface{} + switch root.Op { + case "=": + exprFunc = eqFunc + case ">": + exprFunc = gtFunc + case ">=": + exprFunc = geFunc + case "<": + exprFunc = ltFunc + case "<=": + exprFunc = leFunc + case "!=": + exprFunc = neFunc + } + qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc} + case "or", "and": + isOr := false + if root.Op == "or" { + isOr = true + } + qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr} + case "|": + qyOutput = &unionQuery{Left: left, Right: right} + } + return qyOutput, nil +} + +func (b *builder) processNode(root node) (q query, err error) { + if b.depth = b.depth + 1; b.depth > 1024 { + err = errors.New("the xpath expressions is too complex") + return + } + + switch root.Type() { + case nodeConstantOperand: + n := root.(*operandNode) + q = &constantQuery{Val: n.Val} + case nodeRoot: + q = &contextQuery{Root: true} + case nodeAxis: + q, err = b.processAxisNode(root.(*axisNode)) + b.firstInput = q + case nodeFilter: + q, err = b.processFilterNode(root.(*filterNode)) + case nodeFunction: + q, err = b.processFunctionNode(root.(*functionNode)) + case nodeOperator: + q, err = b.processOperatorNode(root.(*operatorNode)) + } + return +} + +// build builds a specified XPath expressions expr. +func build(expr string) (q query, err error) { + defer func() { + if e := recover(); e != nil { + switch x := e.(type) { + case string: + err = errors.New(x) + case error: + err = x + default: + err = errors.New("unknown panic") + } + } + }() + root := parse(expr) + b := &builder{} + return b.processNode(root) +} diff --git a/vendor/github.com/antchfx/xpath/func.go b/vendor/github.com/antchfx/xpath/func.go new file mode 100644 index 0000000..8fff039 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func.go @@ -0,0 +1,469 @@ +package xpath + +import ( + "errors" + "fmt" + "math" + "strconv" + "strings" +) + +// The XPath function list. + +func predicate(q query) func(NodeNavigator) bool { + type Predicater interface { + Test(NodeNavigator) bool + } + if p, ok := q.(Predicater); ok { + return p.Test + } + return func(NodeNavigator) bool { return true } +} + +// positionFunc is a XPath Node Set functions position(). +func positionFunc(q query, t iterator) interface{} { + var ( + count = 1 + node = t.Current() + ) + test := predicate(q) + for node.MoveToPrevious() { + if test(node) { + count++ + } + } + return float64(count) +} + +// lastFunc is a XPath Node Set functions last(). +func lastFunc(q query, t iterator) interface{} { + var ( + count = 0 + node = t.Current() + ) + node.MoveToFirst() + test := predicate(q) + for { + if test(node) { + count++ + } + if !node.MoveToNext() { + break + } + } + return float64(count) +} + +// countFunc is a XPath Node Set functions count(node-set). +func countFunc(q query, t iterator) interface{} { + var count = 0 + test := predicate(q) + switch typ := q.Evaluate(t).(type) { + case query: + for node := typ.Select(t); node != nil; node = typ.Select(t) { + if test(node) { + count++ + } + } + } + return float64(count) +} + +// sumFunc is a XPath Node Set functions sum(node-set). +func sumFunc(q query, t iterator) interface{} { + var sum float64 + switch typ := q.Evaluate(t).(type) { + case query: + for node := typ.Select(t); node != nil; node = typ.Select(t) { + if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { + sum += v + } + } + case float64: + sum = typ + case string: + v, err := strconv.ParseFloat(typ, 64) + if err != nil { + panic(errors.New("sum() function argument type must be a node-set or number")) + } + sum = v + } + return sum +} + +func asNumber(t iterator, o interface{}) float64 { + switch typ := o.(type) { + case query: + node := typ.Select(t) + if node == nil { + return float64(0) + } + if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { + return v + } + case float64: + return typ + case string: + v, err := strconv.ParseFloat(typ, 64) + if err != nil { + panic(errors.New("ceiling() function argument type must be a node-set or number")) + } + return v + } + return 0 +} + +// ceilingFunc is a XPath Node Set functions ceiling(node-set). +func ceilingFunc(q query, t iterator) interface{} { + val := asNumber(t, q.Evaluate(t)) + return math.Ceil(val) +} + +// floorFunc is a XPath Node Set functions floor(node-set). +func floorFunc(q query, t iterator) interface{} { + val := asNumber(t, q.Evaluate(t)) + return math.Floor(val) +} + +// roundFunc is a XPath Node Set functions round(node-set). +func roundFunc(q query, t iterator) interface{} { + val := asNumber(t, q.Evaluate(t)) + //return math.Round(val) + return round(val) +} + +// nameFunc is a XPath functions name([node-set]). +func nameFunc(q query, t iterator) interface{} { + v := q.Select(t) + if v == nil { + return "" + } + ns := v.Prefix() + if ns == "" { + return v.LocalName() + } + return ns + ":" + v.LocalName() +} + +// localNameFunc is a XPath functions local-name([node-set]). +func localNameFunc(q query, t iterator) interface{} { + v := q.Select(t) + if v == nil { + return "" + } + return v.LocalName() +} + +// namespaceFunc is a XPath functions namespace-uri([node-set]). +func namespaceFunc(q query, t iterator) interface{} { + v := q.Select(t) + if v == nil { + return "" + } + return v.Prefix() +} + +func asBool(t iterator, v interface{}) bool { + switch v := v.(type) { + case nil: + return false + case *NodeIterator: + return v.MoveNext() + case bool: + return bool(v) + case float64: + return v != 0 + case string: + return v != "" + case query: + return v.Select(t) != nil + default: + panic(fmt.Errorf("unexpected type: %T", v)) + } +} + +func asString(t iterator, v interface{}) string { + switch v := v.(type) { + case nil: + return "" + case bool: + if v { + return "true" + } + return "false" + case float64: + return strconv.FormatFloat(v, 'g', -1, 64) + case string: + return v + default: + panic(fmt.Errorf("unexpected type: %T", v)) + } +} + +// booleanFunc is a XPath functions boolean([node-set]). +func booleanFunc(q query, t iterator) interface{} { + v := q.Evaluate(t) + return asBool(t, v) +} + +// numberFunc is a XPath functions number([node-set]). +func numberFunc(q query, t iterator) interface{} { + v := q.Evaluate(t) + return asNumber(t, v) +} + +// stringFunc is a XPath functions string([node-set]). +func stringFunc(q query, t iterator) interface{} { + v := q.Evaluate(t) + return asString(t, v) +} + +// startwithFunc is a XPath functions starts-with(string, string). +func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := arg1.Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("starts-with() function argument type must be string")) + } + n, ok = arg2.Evaluate(t).(string) + if !ok { + panic(errors.New("starts-with() function argument type must be string")) + } + return strings.HasPrefix(m, n) + } +} + +// endwithFunc is a XPath functions ends-with(string, string). +func endwithFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + switch typ := arg1.Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("ends-with() function argument type must be string")) + } + n, ok = arg2.Evaluate(t).(string) + if !ok { + panic(errors.New("ends-with() function argument type must be string")) + } + return strings.HasSuffix(m, n) + } +} + +// containsFunc is a XPath functions contains(string or @attr, string). +func containsFunc(arg1, arg2 query) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var ( + m, n string + ok bool + ) + + switch typ := arg1.Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + default: + panic(errors.New("contains() function argument type must be string")) + } + + n, ok = arg2.Evaluate(t).(string) + if !ok { + panic(errors.New("contains() function argument type must be string")) + } + + return strings.Contains(m, n) + } +} + +// normalizespaceFunc is XPath functions normalize-space(string?) +func normalizespaceFunc(q query, t iterator) interface{} { + var m string + switch typ := q.Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return false + } + m = node.Value() + } + return strings.TrimSpace(m) +} + +// substringFunc is XPath functions substring function returns a part of a given string. +func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var m string + switch typ := arg1.Evaluate(t).(type) { + case string: + m = typ + case query: + node := typ.Select(t) + if node == nil { + return "" + } + m = node.Value() + } + + var start, length float64 + var ok bool + + if start, ok = arg2.Evaluate(t).(float64); !ok { + panic(errors.New("substring() function first argument type must be int")) + } else if start < 1 { + panic(errors.New("substring() function first argument type must be >= 1")) + } + start-- + if arg3 != nil { + if length, ok = arg3.Evaluate(t).(float64); !ok { + panic(errors.New("substring() function second argument type must be int")) + } + } + if (len(m) - int(start)) < int(length) { + panic(errors.New("substring() function start and length argument out of range")) + } + if length > 0 { + return m[int(start):int(length+start)] + } + return m[int(start):] + } +} + +// substringIndFunc is XPath functions substring-before/substring-after function returns a part of a given string. +func substringIndFunc(arg1, arg2 query, after bool) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var str string + switch v := arg1.Evaluate(t).(type) { + case string: + str = v + case query: + node := v.Select(t) + if node == nil { + return "" + } + str = node.Value() + } + var word string + switch v := arg2.Evaluate(t).(type) { + case string: + word = v + case query: + node := v.Select(t) + if node == nil { + return "" + } + word = node.Value() + } + if word == "" { + return "" + } + + i := strings.Index(str, word) + if i < 0 { + return "" + } + if after { + return str[i+len(word):] + } + return str[:i] + } +} + +// stringLengthFunc is XPATH string-length( [string] ) function that returns a number +// equal to the number of characters in a given string. +func stringLengthFunc(arg1 query) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + switch v := arg1.Evaluate(t).(type) { + case string: + return float64(len(v)) + case query: + node := v.Select(t) + if node == nil { + break + } + return float64(len(node.Value())) + } + return float64(0) + } +} + +// translateFunc is XPath functions translate() function returns a replaced string. +func translateFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + str := asString(t, arg1.Evaluate(t)) + src := asString(t, arg2.Evaluate(t)) + dst := asString(t, arg3.Evaluate(t)) + + var replace []string + for i, s := range src { + d := "" + if i < len(dst) { + d = string(dst[i]) + } + replace = append(replace, string(s), d) + } + return strings.NewReplacer(replace...).Replace(str) + } +} + +// notFunc is XPATH functions not(expression) function operation. +func notFunc(q query, t iterator) interface{} { + switch v := q.Evaluate(t).(type) { + case bool: + return !v + case query: + node := v.Select(t) + return node == nil + default: + return false + } +} + +// concatFunc is the concat function concatenates two or more +// strings and returns the resulting string. +// concat( string1 , string2 [, stringn]* ) +func concatFunc(args ...query) func(query, iterator) interface{} { + return func(q query, t iterator) interface{} { + var a []string + for _, v := range args { + switch v := v.Evaluate(t).(type) { + case string: + a = append(a, v) + case query: + node := v.Select(t) + if node != nil { + a = append(a, node.Value()) + } + } + } + return strings.Join(a, "") + } +} diff --git a/vendor/github.com/antchfx/xpath/func_go110.go b/vendor/github.com/antchfx/xpath/func_go110.go new file mode 100644 index 0000000..500880f --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func_go110.go @@ -0,0 +1,9 @@ +// +build go1.10 + +package xpath + +import "math" + +func round(f float64) int { + return int(math.Round(f)) +} diff --git a/vendor/github.com/antchfx/xpath/func_pre_go110.go b/vendor/github.com/antchfx/xpath/func_pre_go110.go new file mode 100644 index 0000000..043616b --- /dev/null +++ b/vendor/github.com/antchfx/xpath/func_pre_go110.go @@ -0,0 +1,15 @@ +// +build !go1.10 + +package xpath + +import "math" + +// math.Round() is supported by Go 1.10+, +// This method just compatible for version <1.10. +// https://github.com/golang/go/issues/20100 +func round(f float64) int { + if math.Abs(f) < 0.5 { + return 0 + } + return int(f + math.Copysign(0.5, f)) +} diff --git a/vendor/github.com/antchfx/xpath/operator.go b/vendor/github.com/antchfx/xpath/operator.go new file mode 100644 index 0000000..308d3cb --- /dev/null +++ b/vendor/github.com/antchfx/xpath/operator.go @@ -0,0 +1,295 @@ +package xpath + +import ( + "fmt" + "reflect" + "strconv" +) + +// The XPath number operator function list. + +// valueType is a return value type. +type valueType int + +const ( + booleanType valueType = iota + numberType + stringType + nodeSetType +) + +func getValueType(i interface{}) valueType { + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Float64: + return numberType + case reflect.String: + return stringType + case reflect.Bool: + return booleanType + default: + if _, ok := i.(query); ok { + return nodeSetType + } + } + panic(fmt.Errorf("xpath unknown value type: %v", v.Kind())) +} + +type logical func(iterator, string, interface{}, interface{}) bool + +var logicalFuncs = [][]logical{ + {cmpBooleanBoolean, nil, nil, nil}, + {nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet}, + {nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet}, + {nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet}, +} + +// number vs number +func cmpNumberNumberF(op string, a, b float64) bool { + switch op { + case "=": + return a == b + case ">": + return a > b + case "<": + return a < b + case ">=": + return a >= b + case "<=": + return a <= b + case "!=": + return a != b + } + return false +} + +// string vs string +func cmpStringStringF(op string, a, b string) bool { + switch op { + case "=": + return a == b + case ">": + return a > b + case "<": + return a < b + case ">=": + return a >= b + case "<=": + return a <= b + case "!=": + return a != b + } + return false +} + +func cmpBooleanBooleanF(op string, a, b bool) bool { + switch op { + case "or": + return a || b + case "and": + return a && b + } + return false +} + +func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(float64) + return cmpNumberNumberF(op, a, b) +} + +func cmpNumericString(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(string) + num, err := strconv.ParseFloat(b, 64) + if err != nil { + panic(err) + } + return cmpNumberNumberF(op, a, num) +} + +func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(float64) + b := n.(query) + + for { + node := b.Select(t) + if node == nil { + break + } + num, err := strconv.ParseFloat(node.Value(), 64) + if err != nil { + panic(err) + } + if cmpNumberNumberF(op, a, num) { + return true + } + } + return false +} + +func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(float64) + for { + node := a.Select(t) + if node == nil { + break + } + num, err := strconv.ParseFloat(node.Value(), 64) + if err != nil { + panic(err) + } + if cmpNumberNumberF(op, num, b) { + return true + } + } + return false +} + +func cmpNodeSetString(t iterator, op string, m, n interface{}) bool { + a := m.(query) + b := n.(string) + for { + node := a.Select(t) + if node == nil { + break + } + if cmpStringStringF(op, b, node.Value()) { + return true + } + } + return false +} + +func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool { + return false +} + +func cmpStringNumeric(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(float64) + num, err := strconv.ParseFloat(a, 64) + if err != nil { + panic(err) + } + return cmpNumberNumberF(op, b, num) +} + +func cmpStringString(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(string) + return cmpStringStringF(op, a, b) +} + +func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool { + a := m.(string) + b := n.(query) + for { + node := b.Select(t) + if node == nil { + break + } + if cmpStringStringF(op, a, node.Value()) { + return true + } + } + return false +} + +func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool { + a := m.(bool) + b := n.(bool) + return cmpBooleanBooleanF(op, a, b) +} + +// eqFunc is an `=` operator. +func eqFunc(t iterator, m, n interface{}) interface{} { + t1 := getValueType(m) + t2 := getValueType(n) + return logicalFuncs[t1][t2](t, "=", m, n) +} + +// gtFunc is an `>` operator. +func gtFunc(t iterator, m, n interface{}) interface{} { + t1 := getValueType(m) + t2 := getValueType(n) + return logicalFuncs[t1][t2](t, ">", m, n) +} + +// geFunc is an `>=` operator. +func geFunc(t iterator, m, n interface{}) interface{} { + t1 := getValueType(m) + t2 := getValueType(n) + return logicalFuncs[t1][t2](t, ">=", m, n) +} + +// ltFunc is an `<` operator. +func ltFunc(t iterator, m, n interface{}) interface{} { + t1 := getValueType(m) + t2 := getValueType(n) + return logicalFuncs[t1][t2](t, "<", m, n) +} + +// leFunc is an `<=` operator. +func leFunc(t iterator, m, n interface{}) interface{} { + t1 := getValueType(m) + t2 := getValueType(n) + return logicalFuncs[t1][t2](t, "<=", m, n) +} + +// neFunc is an `!=` operator. +func neFunc(t iterator, m, n interface{}) interface{} { + t1 := getValueType(m) + t2 := getValueType(n) + return logicalFuncs[t1][t2](t, "!=", m, n) +} + +// orFunc is an `or` operator. +var orFunc = func(t iterator, m, n interface{}) interface{} { + t1 := getValueType(m) + t2 := getValueType(n) + return logicalFuncs[t1][t2](t, "or", m, n) +} + +func numericExpr(m, n interface{}, cb func(float64, float64) float64) float64 { + typ := reflect.TypeOf(float64(0)) + a := reflect.ValueOf(m).Convert(typ) + b := reflect.ValueOf(n).Convert(typ) + return cb(a.Float(), b.Float()) +} + +// plusFunc is an `+` operator. +var plusFunc = func(m, n interface{}) interface{} { + return numericExpr(m, n, func(a, b float64) float64 { + return a + b + }) +} + +// minusFunc is an `-` operator. +var minusFunc = func(m, n interface{}) interface{} { + return numericExpr(m, n, func(a, b float64) float64 { + return a - b + }) +} + +// mulFunc is an `*` operator. +var mulFunc = func(m, n interface{}) interface{} { + return numericExpr(m, n, func(a, b float64) float64 { + return a * b + }) +} + +// divFunc is an `DIV` operator. +var divFunc = func(m, n interface{}) interface{} { + return numericExpr(m, n, func(a, b float64) float64 { + return a / b + }) +} + +// modFunc is an 'MOD' operator. +var modFunc = func(m, n interface{}) interface{} { + return numericExpr(m, n, func(a, b float64) float64 { + return float64(int(a) % int(b)) + }) +} diff --git a/vendor/github.com/antchfx/xpath/parse.go b/vendor/github.com/antchfx/xpath/parse.go new file mode 100644 index 0000000..6103131 --- /dev/null +++ b/vendor/github.com/antchfx/xpath/parse.go @@ -0,0 +1,1164 @@ +package xpath + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "unicode" +) + +// A XPath expression token type. +type itemType int + +const ( + itemComma itemType = iota // ',' + itemSlash // '/' + itemAt // '@' + itemDot // '.' + itemLParens // '(' + itemRParens // ')' + itemLBracket // '[' + itemRBracket // ']' + itemStar // '*' + itemPlus // '+' + itemMinus // '-' + itemEq // '=' + itemLt // '<' + itemGt // '>' + itemBang // '!' + itemDollar // '$' + itemApos // '\'' + itemQuote // '"' + itemUnion // '|' + itemNe // '!=' + itemLe // '<=' + itemGe // '>=' + itemAnd // '&&' + itemOr // '||' + itemDotDot // '..' + itemSlashSlash // '//' + itemName // XML Name + itemString // Quoted string constant + itemNumber // Number constant + itemAxe // Axe (like child::) + itemEof // END +) + +// A node is an XPath node in the parse tree. +type node interface { + Type() nodeType +} + +// nodeType identifies the type of a parse tree node. +type nodeType int + +func (t nodeType) Type() nodeType { + return t +} + +const ( + nodeRoot nodeType = iota + nodeAxis + nodeFilter + nodeFunction + nodeOperator + nodeVariable + nodeConstantOperand +) + +type parser struct { + r *scanner + d int +} + +// newOperatorNode returns new operator node OperatorNode. +func newOperatorNode(op string, left, right node) node { + return &operatorNode{nodeType: nodeOperator, Op: op, Left: left, Right: right} +} + +// newOperand returns new constant operand node OperandNode. +func newOperandNode(v interface{}) node { + return &operandNode{nodeType: nodeConstantOperand, Val: v} +} + +// newAxisNode returns new axis node AxisNode. +func newAxisNode(axeTyp, localName, prefix, prop string, n node) node { + return &axisNode{ + nodeType: nodeAxis, + LocalName: localName, + Prefix: prefix, + AxeType: axeTyp, + Prop: prop, + Input: n, + } +} + +// newVariableNode returns new variable node VariableNode. +func newVariableNode(prefix, name string) node { + return &variableNode{nodeType: nodeVariable, Name: name, Prefix: prefix} +} + +// newFilterNode returns a new filter node FilterNode. +func newFilterNode(n, m node) node { + return &filterNode{nodeType: nodeFilter, Input: n, Condition: m} +} + +// newRootNode returns a root node. +func newRootNode(s string) node { + return &rootNode{nodeType: nodeRoot, slash: s} +} + +// newFunctionNode returns function call node. +func newFunctionNode(name, prefix string, args []node) node { + return &functionNode{nodeType: nodeFunction, Prefix: prefix, FuncName: name, Args: args} +} + +// testOp reports whether current item name is an operand op. +func testOp(r *scanner, op string) bool { + return r.typ == itemName && r.prefix == "" && r.name == op +} + +func isPrimaryExpr(r *scanner) bool { + switch r.typ { + case itemString, itemNumber, itemDollar, itemLParens: + return true + case itemName: + return r.canBeFunc && !isNodeType(r) + } + return false +} + +func isNodeType(r *scanner) bool { + switch r.name { + case "node", "text", "processing-instruction", "comment": + return r.prefix == "" + } + return false +} + +func isStep(item itemType) bool { + switch item { + case itemDot, itemDotDot, itemAt, itemAxe, itemStar, itemName: + return true + } + return false +} + +func checkItem(r *scanner, typ itemType) { + if r.typ != typ { + panic(fmt.Sprintf("%s has an invalid token", r.text)) + } +} + +// parseExpression parsing the expression with input node n. +func (p *parser) parseExpression(n node) node { + if p.d = p.d + 1; p.d > 200 { + panic("the xpath query is too complex(depth > 200)") + } + n = p.parseOrExpr(n) + p.d-- + return n +} + +// next scanning next item on forward. +func (p *parser) next() bool { + return p.r.nextItem() +} + +func (p *parser) skipItem(typ itemType) { + checkItem(p.r, typ) + p.next() +} + +// OrExpr ::= AndExpr | OrExpr 'or' AndExpr +func (p *parser) parseOrExpr(n node) node { + opnd := p.parseAndExpr(n) + for { + if !testOp(p.r, "or") { + break + } + p.next() + opnd = newOperatorNode("or", opnd, p.parseAndExpr(n)) + } + return opnd +} + +// AndExpr ::= EqualityExpr | AndExpr 'and' EqualityExpr +func (p *parser) parseAndExpr(n node) node { + opnd := p.parseEqualityExpr(n) + for { + if !testOp(p.r, "and") { + break + } + p.next() + opnd = newOperatorNode("and", opnd, p.parseEqualityExpr(n)) + } + return opnd +} + +// EqualityExpr ::= RelationalExpr | EqualityExpr '=' RelationalExpr | EqualityExpr '!=' RelationalExpr +func (p *parser) parseEqualityExpr(n node) node { + opnd := p.parseRelationalExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemEq: + op = "=" + case itemNe: + op = "!=" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseRelationalExpr(n)) + } + return opnd +} + +// RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr +// | RelationalExpr '<=' AdditiveExpr +// | RelationalExpr '>=' AdditiveExpr +func (p *parser) parseRelationalExpr(n node) node { + opnd := p.parseAdditiveExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemLt: + op = "<" + case itemGt: + op = ">" + case itemLe: + op = "<=" + case itemGe: + op = ">=" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseAdditiveExpr(n)) + } + return opnd +} + +// AdditiveExpr ::= MultiplicativeExpr | AdditiveExpr '+' MultiplicativeExpr | AdditiveExpr '-' MultiplicativeExpr +func (p *parser) parseAdditiveExpr(n node) node { + opnd := p.parseMultiplicativeExpr(n) +Loop: + for { + var op string + switch p.r.typ { + case itemPlus: + op = "+" + case itemMinus: + op = "-" + default: + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseMultiplicativeExpr(n)) + } + return opnd +} + +// MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr +// | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr +func (p *parser) parseMultiplicativeExpr(n node) node { + opnd := p.parseUnaryExpr(n) +Loop: + for { + var op string + if p.r.typ == itemStar { + op = "*" + } else if testOp(p.r, "div") || testOp(p.r, "mod") { + op = p.r.name + } else { + break Loop + } + p.next() + opnd = newOperatorNode(op, opnd, p.parseUnaryExpr(n)) + } + return opnd +} + +// UnaryExpr ::= UnionExpr | '-' UnaryExpr +func (p *parser) parseUnaryExpr(n node) node { + minus := false + // ignore '-' sequence + for p.r.typ == itemMinus { + p.next() + minus = !minus + } + opnd := p.parseUnionExpr(n) + if minus { + opnd = newOperatorNode("*", opnd, newOperandNode(float64(-1))) + } + return opnd +} + +// UnionExpr ::= PathExpr | UnionExpr '|' PathExpr +func (p *parser) parseUnionExpr(n node) node { + opnd := p.parsePathExpr(n) +Loop: + for { + if p.r.typ != itemUnion { + break Loop + } + p.next() + opnd2 := p.parsePathExpr(n) + // Checking the node type that must be is node set type? + opnd = newOperatorNode("|", opnd, opnd2) + } + return opnd +} + +// PathExpr ::= LocationPath | FilterExpr | FilterExpr '/' RelativeLocationPath | FilterExpr '//' RelativeLocationPath +func (p *parser) parsePathExpr(n node) node { + var opnd node + if isPrimaryExpr(p.r) { + opnd = p.parseFilterExpr(n) + switch p.r.typ { + case itemSlash: + p.next() + opnd = p.parseRelativeLocationPath(opnd) + case itemSlashSlash: + p.next() + opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd)) + } + } else { + opnd = p.parseLocationPath(nil) + } + return opnd +} + +// FilterExpr ::= PrimaryExpr | FilterExpr Predicate +func (p *parser) parseFilterExpr(n node) node { + opnd := p.parsePrimaryExpr(n) + if p.r.typ == itemLBracket { + opnd = newFilterNode(opnd, p.parsePredicate(opnd)) + } + return opnd +} + +// Predicate ::= '[' PredicateExpr ']' +func (p *parser) parsePredicate(n node) node { + p.skipItem(itemLBracket) + opnd := p.parseExpression(n) + p.skipItem(itemRBracket) + return opnd +} + +// LocationPath ::= RelativeLocationPath | AbsoluteLocationPath +func (p *parser) parseLocationPath(n node) (opnd node) { + switch p.r.typ { + case itemSlash: + p.next() + opnd = newRootNode("/") + if isStep(p.r.typ) { + opnd = p.parseRelativeLocationPath(opnd) // ?? child:: or self ?? + } + case itemSlashSlash: + p.next() + opnd = newRootNode("//") + opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd)) + default: + opnd = p.parseRelativeLocationPath(n) + } + return opnd +} + +// RelativeLocationPath ::= Step | RelativeLocationPath '/' Step | AbbreviatedRelativeLocationPath +func (p *parser) parseRelativeLocationPath(n node) node { + opnd := n +Loop: + for { + opnd = p.parseStep(opnd) + switch p.r.typ { + case itemSlashSlash: + p.next() + opnd = newAxisNode("descendant-or-self", "", "", "", opnd) + case itemSlash: + p.next() + default: + break Loop + } + } + return opnd +} + +// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep +func (p *parser) parseStep(n node) node { + axeTyp := "child" // default axes value. + if p.r.typ == itemDot || p.r.typ == itemDotDot { + if p.r.typ == itemDot { + axeTyp = "self" + } else { + axeTyp = "parent" + } + p.next() + return newAxisNode(axeTyp, "", "", "", n) + } + switch p.r.typ { + case itemAt: + p.next() + axeTyp = "attribute" + case itemAxe: + axeTyp = p.r.name + p.next() + } + opnd := p.parseNodeTest(n, axeTyp) + for p.r.typ == itemLBracket { + opnd = newFilterNode(opnd, p.parsePredicate(opnd)) + } + return opnd +} + +// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')' +func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) { + switch p.r.typ { + case itemName: + if p.r.canBeFunc && isNodeType(p.r) { + var prop string + switch p.r.name { + case "comment", "text", "processing-instruction", "node": + prop = p.r.name + } + var name string + p.next() + p.skipItem(itemLParens) + if prop == "processing-instruction" && p.r.typ != itemRParens { + checkItem(p.r, itemString) + name = p.r.strval + p.next() + } + p.skipItem(itemRParens) + opnd = newAxisNode(axeTyp, name, "", prop, n) + } else { + prefix := p.r.prefix + name := p.r.name + p.next() + if p.r.name == "*" { + name = "" + } + opnd = newAxisNode(axeTyp, name, prefix, "", n) + } + case itemStar: + opnd = newAxisNode(axeTyp, "", "", "", n) + p.next() + default: + panic("expression must evaluate to a node-set") + } + return opnd +} + +// PrimaryExpr ::= VariableReference | '(' Expr ')' | Literal | Number | FunctionCall +func (p *parser) parsePrimaryExpr(n node) (opnd node) { + switch p.r.typ { + case itemString: + opnd = newOperandNode(p.r.strval) + p.next() + case itemNumber: + opnd = newOperandNode(p.r.numval) + p.next() + case itemDollar: + p.next() + checkItem(p.r, itemName) + opnd = newVariableNode(p.r.prefix, p.r.name) + p.next() + case itemLParens: + p.next() + opnd = p.parseExpression(n) + p.skipItem(itemRParens) + case itemName: + if p.r.canBeFunc && !isNodeType(p.r) { + opnd = p.parseMethod(nil) + } + } + return opnd +} + +// FunctionCall ::= FunctionName '(' ( Argument ( ',' Argument )* )? ')' +func (p *parser) parseMethod(n node) node { + var args []node + name := p.r.name + prefix := p.r.prefix + + p.skipItem(itemName) + p.skipItem(itemLParens) + if p.r.typ != itemRParens { + for { + args = append(args, p.parseExpression(n)) + if p.r.typ == itemRParens { + break + } + p.skipItem(itemComma) + } + } + p.skipItem(itemRParens) + return newFunctionNode(name, prefix, args) +} + +// Parse parsing the XPath express string expr and returns a tree node. +func parse(expr string) node { + r := &scanner{text: expr} + r.nextChar() + r.nextItem() + p := &parser{r: r} + return p.parseExpression(nil) +} + +// rootNode holds a top-level node of tree. +type rootNode struct { + nodeType + slash string +} + +func (r *rootNode) String() string { + return r.slash +} + +// operatorNode holds two Nodes operator. +type operatorNode struct { + nodeType + Op string + Left, Right node +} + +func (o *operatorNode) String() string { + return fmt.Sprintf("%v%s%v", o.Left, o.Op, o.Right) +} + +// axisNode holds a location step. +type axisNode struct { + nodeType + Input node + Prop string // node-test name.[comment|text|processing-instruction|node] + AxeType string // name of the axes.[attribute|ancestor|child|....] + LocalName string // local part name of node. + Prefix string // prefix name of node. +} + +func (a *axisNode) String() string { + var b bytes.Buffer + if a.AxeType != "" { + b.Write([]byte(a.AxeType + "::")) + } + if a.Prefix != "" { + b.Write([]byte(a.Prefix + ":")) + } + b.Write([]byte(a.LocalName)) + if a.Prop != "" { + b.Write([]byte("/" + a.Prop + "()")) + } + return b.String() +} + +// operandNode holds a constant operand. +type operandNode struct { + nodeType + Val interface{} +} + +func (o *operandNode) String() string { + return fmt.Sprintf("%v", o.Val) +} + +// filterNode holds a condition filter. +type filterNode struct { + nodeType + Input, Condition node +} + +func (f *filterNode) String() string { + return fmt.Sprintf("%s[%s]", f.Input, f.Condition) +} + +// variableNode holds a variable. +type variableNode struct { + nodeType + Name, Prefix string +} + +func (v *variableNode) String() string { + if v.Prefix == "" { + return v.Name + } + return fmt.Sprintf("%s:%s", v.Prefix, v.Name) +} + +// functionNode holds a function call. +type functionNode struct { + nodeType + Args []node + Prefix string + FuncName string // function name +} + +func (f *functionNode) String() string { + var b bytes.Buffer + // fun(arg1, ..., argn) + b.Write([]byte(f.FuncName)) + b.Write([]byte("(")) + for i, arg := range f.Args { + if i > 0 { + b.Write([]byte(",")) + } + b.Write([]byte(fmt.Sprintf("%s", arg))) + } + b.Write([]byte(")")) + return b.String() +} + +type scanner struct { + text, name, prefix string + + pos int + curr rune + typ itemType + strval string // text value at current pos + numval float64 // number value at current pos + canBeFunc bool +} + +func (s *scanner) nextChar() bool { + if s.pos >= len(s.text) { + s.curr = rune(0) + return false + } + s.curr = rune(s.text[s.pos]) + s.pos += 1 + return true +} + +func (s *scanner) nextItem() bool { + s.skipSpace() + switch s.curr { + case 0: + s.typ = itemEof + return false + case ',', '@', '(', ')', '|', '*', '[', ']', '+', '-', '=', '#', '$': + s.typ = asItemType(s.curr) + s.nextChar() + case '<': + s.typ = itemLt + s.nextChar() + if s.curr == '=' { + s.typ = itemLe + s.nextChar() + } + case '>': + s.typ = itemGt + s.nextChar() + if s.curr == '=' { + s.typ = itemGe + s.nextChar() + } + case '!': + s.typ = itemBang + s.nextChar() + if s.curr == '=' { + s.typ = itemNe + s.nextChar() + } + case '.': + s.typ = itemDot + s.nextChar() + if s.curr == '.' { + s.typ = itemDotDot + s.nextChar() + } else if isDigit(s.curr) { + s.typ = itemNumber + s.numval = s.scanFraction() + } + case '/': + s.typ = itemSlash + s.nextChar() + if s.curr == '/' { + s.typ = itemSlashSlash + s.nextChar() + } + case '"', '\'': + s.typ = itemString + s.strval = s.scanString() + default: + if isDigit(s.curr) { + s.typ = itemNumber + s.numval = s.scanNumber() + } else if isName(s.curr) { + s.typ = itemName + s.name = s.scanName() + s.prefix = "" + // "foo:bar" is one itemem not three because it doesn't allow spaces in between + // We should distinct it from "foo::" and need process "foo ::" as well + if s.curr == ':' { + s.nextChar() + // can be "foo:bar" or "foo::" + if s.curr == ':' { + // "foo::" + s.nextChar() + s.typ = itemAxe + } else { // "foo:*", "foo:bar" or "foo: " + s.prefix = s.name + if s.curr == '*' { + s.nextChar() + s.name = "*" + } else if isName(s.curr) { + s.name = s.scanName() + } else { + panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) + } + } + } else { + s.skipSpace() + if s.curr == ':' { + s.nextChar() + // it can be "foo ::" or just "foo :" + if s.curr == ':' { + s.nextChar() + s.typ = itemAxe + } else { + panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) + } + } + } + s.skipSpace() + s.canBeFunc = s.curr == '(' + } else { + panic(fmt.Sprintf("%s has an invalid token.", s.text)) + } + } + return true +} + +func (s *scanner) skipSpace() { +Loop: + for { + if !unicode.IsSpace(s.curr) || !s.nextChar() { + break Loop + } + } +} + +func (s *scanner) scanFraction() float64 { + var ( + i = s.pos - 2 + c = 1 // '.' + ) + for isDigit(s.curr) { + s.nextChar() + c++ + } + v, err := strconv.ParseFloat(s.text[i:i+c], 64) + if err != nil { + panic(fmt.Errorf("xpath: scanFraction parse float got error: %v", err)) + } + return v +} + +func (s *scanner) scanNumber() float64 { + var ( + c int + i = s.pos - 1 + ) + for isDigit(s.curr) { + s.nextChar() + c++ + } + if s.curr == '.' { + s.nextChar() + c++ + for isDigit(s.curr) { + s.nextChar() + c++ + } + } + v, err := strconv.ParseFloat(s.text[i:i+c], 64) + if err != nil { + panic(fmt.Errorf("xpath: scanNumber parse float got error: %v", err)) + } + return v +} + +func (s *scanner) scanString() string { + var ( + c = 0 + end = s.curr + ) + s.nextChar() + i := s.pos - 1 + for s.curr != end { + if !s.nextChar() { + panic(errors.New("xpath: scanString got unclosed string")) + } + c++ + } + s.nextChar() + return s.text[i : i+c] +} + +func (s *scanner) scanName() string { + var ( + c int + i = s.pos - 1 + ) + for isName(s.curr) { + c++ + if !s.nextChar() { + break + } + } + return s.text[i : i+c] +} + +func isName(r rune) bool { + return string(r) != ":" && string(r) != "/" && + (unicode.Is(first, r) || unicode.Is(second, r) || string(r) == "*") +} + +func isDigit(r rune) bool { + return unicode.IsDigit(r) +} + +func asItemType(r rune) itemType { + switch r { + case ',': + return itemComma + case '@': + return itemAt + case '(': + return itemLParens + case ')': + return itemRParens + case '|': + return itemUnion + case '*': + return itemStar + case '[': + return itemLBracket + case ']': + return itemRBracket + case '+': + return itemPlus + case '-': + return itemMinus + case '=': + return itemEq + case '$': + return itemDollar + } + panic(fmt.Errorf("unknown item: %v", r)) +} + +var first = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, + {0x0041, 0x005A, 1}, + {0x005F, 0x005F, 1}, + {0x0061, 0x007A, 1}, + {0x00C0, 0x00D6, 1}, + {0x00D8, 0x00F6, 1}, + {0x00F8, 0x00FF, 1}, + {0x0100, 0x0131, 1}, + {0x0134, 0x013E, 1}, + {0x0141, 0x0148, 1}, + {0x014A, 0x017E, 1}, + {0x0180, 0x01C3, 1}, + {0x01CD, 0x01F0, 1}, + {0x01F4, 0x01F5, 1}, + {0x01FA, 0x0217, 1}, + {0x0250, 0x02A8, 1}, + {0x02BB, 0x02C1, 1}, + {0x0386, 0x0386, 1}, + {0x0388, 0x038A, 1}, + {0x038C, 0x038C, 1}, + {0x038E, 0x03A1, 1}, + {0x03A3, 0x03CE, 1}, + {0x03D0, 0x03D6, 1}, + {0x03DA, 0x03E0, 2}, + {0x03E2, 0x03F3, 1}, + {0x0401, 0x040C, 1}, + {0x040E, 0x044F, 1}, + {0x0451, 0x045C, 1}, + {0x045E, 0x0481, 1}, + {0x0490, 0x04C4, 1}, + {0x04C7, 0x04C8, 1}, + {0x04CB, 0x04CC, 1}, + {0x04D0, 0x04EB, 1}, + {0x04EE, 0x04F5, 1}, + {0x04F8, 0x04F9, 1}, + {0x0531, 0x0556, 1}, + {0x0559, 0x0559, 1}, + {0x0561, 0x0586, 1}, + {0x05D0, 0x05EA, 1}, + {0x05F0, 0x05F2, 1}, + {0x0621, 0x063A, 1}, + {0x0641, 0x064A, 1}, + {0x0671, 0x06B7, 1}, + {0x06BA, 0x06BE, 1}, + {0x06C0, 0x06CE, 1}, + {0x06D0, 0x06D3, 1}, + {0x06D5, 0x06D5, 1}, + {0x06E5, 0x06E6, 1}, + {0x0905, 0x0939, 1}, + {0x093D, 0x093D, 1}, + {0x0958, 0x0961, 1}, + {0x0985, 0x098C, 1}, + {0x098F, 0x0990, 1}, + {0x0993, 0x09A8, 1}, + {0x09AA, 0x09B0, 1}, + {0x09B2, 0x09B2, 1}, + {0x09B6, 0x09B9, 1}, + {0x09DC, 0x09DD, 1}, + {0x09DF, 0x09E1, 1}, + {0x09F0, 0x09F1, 1}, + {0x0A05, 0x0A0A, 1}, + {0x0A0F, 0x0A10, 1}, + {0x0A13, 0x0A28, 1}, + {0x0A2A, 0x0A30, 1}, + {0x0A32, 0x0A33, 1}, + {0x0A35, 0x0A36, 1}, + {0x0A38, 0x0A39, 1}, + {0x0A59, 0x0A5C, 1}, + {0x0A5E, 0x0A5E, 1}, + {0x0A72, 0x0A74, 1}, + {0x0A85, 0x0A8B, 1}, + {0x0A8D, 0x0A8D, 1}, + {0x0A8F, 0x0A91, 1}, + {0x0A93, 0x0AA8, 1}, + {0x0AAA, 0x0AB0, 1}, + {0x0AB2, 0x0AB3, 1}, + {0x0AB5, 0x0AB9, 1}, + {0x0ABD, 0x0AE0, 0x23}, + {0x0B05, 0x0B0C, 1}, + {0x0B0F, 0x0B10, 1}, + {0x0B13, 0x0B28, 1}, + {0x0B2A, 0x0B30, 1}, + {0x0B32, 0x0B33, 1}, + {0x0B36, 0x0B39, 1}, + {0x0B3D, 0x0B3D, 1}, + {0x0B5C, 0x0B5D, 1}, + {0x0B5F, 0x0B61, 1}, + {0x0B85, 0x0B8A, 1}, + {0x0B8E, 0x0B90, 1}, + {0x0B92, 0x0B95, 1}, + {0x0B99, 0x0B9A, 1}, + {0x0B9C, 0x0B9C, 1}, + {0x0B9E, 0x0B9F, 1}, + {0x0BA3, 0x0BA4, 1}, + {0x0BA8, 0x0BAA, 1}, + {0x0BAE, 0x0BB5, 1}, + {0x0BB7, 0x0BB9, 1}, + {0x0C05, 0x0C0C, 1}, + {0x0C0E, 0x0C10, 1}, + {0x0C12, 0x0C28, 1}, + {0x0C2A, 0x0C33, 1}, + {0x0C35, 0x0C39, 1}, + {0x0C60, 0x0C61, 1}, + {0x0C85, 0x0C8C, 1}, + {0x0C8E, 0x0C90, 1}, + {0x0C92, 0x0CA8, 1}, + {0x0CAA, 0x0CB3, 1}, + {0x0CB5, 0x0CB9, 1}, + {0x0CDE, 0x0CDE, 1}, + {0x0CE0, 0x0CE1, 1}, + {0x0D05, 0x0D0C, 1}, + {0x0D0E, 0x0D10, 1}, + {0x0D12, 0x0D28, 1}, + {0x0D2A, 0x0D39, 1}, + {0x0D60, 0x0D61, 1}, + {0x0E01, 0x0E2E, 1}, + {0x0E30, 0x0E30, 1}, + {0x0E32, 0x0E33, 1}, + {0x0E40, 0x0E45, 1}, + {0x0E81, 0x0E82, 1}, + {0x0E84, 0x0E84, 1}, + {0x0E87, 0x0E88, 1}, + {0x0E8A, 0x0E8D, 3}, + {0x0E94, 0x0E97, 1}, + {0x0E99, 0x0E9F, 1}, + {0x0EA1, 0x0EA3, 1}, + {0x0EA5, 0x0EA7, 2}, + {0x0EAA, 0x0EAB, 1}, + {0x0EAD, 0x0EAE, 1}, + {0x0EB0, 0x0EB0, 1}, + {0x0EB2, 0x0EB3, 1}, + {0x0EBD, 0x0EBD, 1}, + {0x0EC0, 0x0EC4, 1}, + {0x0F40, 0x0F47, 1}, + {0x0F49, 0x0F69, 1}, + {0x10A0, 0x10C5, 1}, + {0x10D0, 0x10F6, 1}, + {0x1100, 0x1100, 1}, + {0x1102, 0x1103, 1}, + {0x1105, 0x1107, 1}, + {0x1109, 0x1109, 1}, + {0x110B, 0x110C, 1}, + {0x110E, 0x1112, 1}, + {0x113C, 0x1140, 2}, + {0x114C, 0x1150, 2}, + {0x1154, 0x1155, 1}, + {0x1159, 0x1159, 1}, + {0x115F, 0x1161, 1}, + {0x1163, 0x1169, 2}, + {0x116D, 0x116E, 1}, + {0x1172, 0x1173, 1}, + {0x1175, 0x119E, 0x119E - 0x1175}, + {0x11A8, 0x11AB, 0x11AB - 0x11A8}, + {0x11AE, 0x11AF, 1}, + {0x11B7, 0x11B8, 1}, + {0x11BA, 0x11BA, 1}, + {0x11BC, 0x11C2, 1}, + {0x11EB, 0x11F0, 0x11F0 - 0x11EB}, + {0x11F9, 0x11F9, 1}, + {0x1E00, 0x1E9B, 1}, + {0x1EA0, 0x1EF9, 1}, + {0x1F00, 0x1F15, 1}, + {0x1F18, 0x1F1D, 1}, + {0x1F20, 0x1F45, 1}, + {0x1F48, 0x1F4D, 1}, + {0x1F50, 0x1F57, 1}, + {0x1F59, 0x1F5B, 0x1F5B - 0x1F59}, + {0x1F5D, 0x1F5D, 1}, + {0x1F5F, 0x1F7D, 1}, + {0x1F80, 0x1FB4, 1}, + {0x1FB6, 0x1FBC, 1}, + {0x1FBE, 0x1FBE, 1}, + {0x1FC2, 0x1FC4, 1}, + {0x1FC6, 0x1FCC, 1}, + {0x1FD0, 0x1FD3, 1}, + {0x1FD6, 0x1FDB, 1}, + {0x1FE0, 0x1FEC, 1}, + {0x1FF2, 0x1FF4, 1}, + {0x1FF6, 0x1FFC, 1}, + {0x2126, 0x2126, 1}, + {0x212A, 0x212B, 1}, + {0x212E, 0x212E, 1}, + {0x2180, 0x2182, 1}, + {0x3007, 0x3007, 1}, + {0x3021, 0x3029, 1}, + {0x3041, 0x3094, 1}, + {0x30A1, 0x30FA, 1}, + {0x3105, 0x312C, 1}, + {0x4E00, 0x9FA5, 1}, + {0xAC00, 0xD7A3, 1}, + }, +} + +var second = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x002D, 0x002E, 1}, + {0x0030, 0x0039, 1}, + {0x00B7, 0x00B7, 1}, + {0x02D0, 0x02D1, 1}, + {0x0300, 0x0345, 1}, + {0x0360, 0x0361, 1}, + {0x0387, 0x0387, 1}, + {0x0483, 0x0486, 1}, + {0x0591, 0x05A1, 1}, + {0x05A3, 0x05B9, 1}, + {0x05BB, 0x05BD, 1}, + {0x05BF, 0x05BF, 1}, + {0x05C1, 0x05C2, 1}, + {0x05C4, 0x0640, 0x0640 - 0x05C4}, + {0x064B, 0x0652, 1}, + {0x0660, 0x0669, 1}, + {0x0670, 0x0670, 1}, + {0x06D6, 0x06DC, 1}, + {0x06DD, 0x06DF, 1}, + {0x06E0, 0x06E4, 1}, + {0x06E7, 0x06E8, 1}, + {0x06EA, 0x06ED, 1}, + {0x06F0, 0x06F9, 1}, + {0x0901, 0x0903, 1}, + {0x093C, 0x093C, 1}, + {0x093E, 0x094C, 1}, + {0x094D, 0x094D, 1}, + {0x0951, 0x0954, 1}, + {0x0962, 0x0963, 1}, + {0x0966, 0x096F, 1}, + {0x0981, 0x0983, 1}, + {0x09BC, 0x09BC, 1}, + {0x09BE, 0x09BF, 1}, + {0x09C0, 0x09C4, 1}, + {0x09C7, 0x09C8, 1}, + {0x09CB, 0x09CD, 1}, + {0x09D7, 0x09D7, 1}, + {0x09E2, 0x09E3, 1}, + {0x09E6, 0x09EF, 1}, + {0x0A02, 0x0A3C, 0x3A}, + {0x0A3E, 0x0A3F, 1}, + {0x0A40, 0x0A42, 1}, + {0x0A47, 0x0A48, 1}, + {0x0A4B, 0x0A4D, 1}, + {0x0A66, 0x0A6F, 1}, + {0x0A70, 0x0A71, 1}, + {0x0A81, 0x0A83, 1}, + {0x0ABC, 0x0ABC, 1}, + {0x0ABE, 0x0AC5, 1}, + {0x0AC7, 0x0AC9, 1}, + {0x0ACB, 0x0ACD, 1}, + {0x0AE6, 0x0AEF, 1}, + {0x0B01, 0x0B03, 1}, + {0x0B3C, 0x0B3C, 1}, + {0x0B3E, 0x0B43, 1}, + {0x0B47, 0x0B48, 1}, + {0x0B4B, 0x0B4D, 1}, + {0x0B56, 0x0B57, 1}, + {0x0B66, 0x0B6F, 1}, + {0x0B82, 0x0B83, 1}, + {0x0BBE, 0x0BC2, 1}, + {0x0BC6, 0x0BC8, 1}, + {0x0BCA, 0x0BCD, 1}, + {0x0BD7, 0x0BD7, 1}, + {0x0BE7, 0x0BEF, 1}, + {0x0C01, 0x0C03, 1}, + {0x0C3E, 0x0C44, 1}, + {0x0C46, 0x0C48, 1}, + {0x0C4A, 0x0C4D, 1}, + {0x0C55, 0x0C56, 1}, + {0x0C66, 0x0C6F, 1}, + {0x0C82, 0x0C83, 1}, + {0x0CBE, 0x0CC4, 1}, + {0x0CC6, 0x0CC8, 1}, + {0x0CCA, 0x0CCD, 1}, + {0x0CD5, 0x0CD6, 1}, + {0x0CE6, 0x0CEF, 1}, + {0x0D02, 0x0D03, 1}, + {0x0D3E, 0x0D43, 1}, + {0x0D46, 0x0D48, 1}, + {0x0D4A, 0x0D4D, 1}, + {0x0D57, 0x0D57, 1}, + {0x0D66, 0x0D6F, 1}, + {0x0E31, 0x0E31, 1}, + {0x0E34, 0x0E3A, 1}, + {0x0E46, 0x0E46, 1}, + {0x0E47, 0x0E4E, 1}, + {0x0E50, 0x0E59, 1}, + {0x0EB1, 0x0EB1, 1}, + {0x0EB4, 0x0EB9, 1}, + {0x0EBB, 0x0EBC, 1}, + {0x0EC6, 0x0EC6, 1}, + {0x0EC8, 0x0ECD, 1}, + {0x0ED0, 0x0ED9, 1}, + {0x0F18, 0x0F19, 1}, + {0x0F20, 0x0F29, 1}, + {0x0F35, 0x0F39, 2}, + {0x0F3E, 0x0F3F, 1}, + {0x0F71, 0x0F84, 1}, + {0x0F86, 0x0F8B, 1}, + {0x0F90, 0x0F95, 1}, + {0x0F97, 0x0F97, 1}, + {0x0F99, 0x0FAD, 1}, + {0x0FB1, 0x0FB7, 1}, + {0x0FB9, 0x0FB9, 1}, + {0x20D0, 0x20DC, 1}, + {0x20E1, 0x3005, 0x3005 - 0x20E1}, + {0x302A, 0x302F, 1}, + {0x3031, 0x3035, 1}, + {0x3099, 0x309A, 1}, + {0x309D, 0x309E, 1}, + {0x30FC, 0x30FE, 1}, + }, +} diff --git a/vendor/github.com/antchfx/xpath/query.go b/vendor/github.com/antchfx/xpath/query.go new file mode 100644 index 0000000..3bb5b5c --- /dev/null +++ b/vendor/github.com/antchfx/xpath/query.go @@ -0,0 +1,791 @@ +package xpath + +import ( + "reflect" +) + +type iterator interface { + Current() NodeNavigator +} + +// An XPath query interface. +type query interface { + // Select traversing iterator returns a query matched node NodeNavigator. + Select(iterator) NodeNavigator + + // Evaluate evaluates query and returns values of the current query. + Evaluate(iterator) interface{} + + Clone() query +} + +// contextQuery is returns current node on the iterator object query. +type contextQuery struct { + count int + Root bool // Moving to root-level node in the current context iterator. +} + +func (c *contextQuery) Select(t iterator) (n NodeNavigator) { + if c.count == 0 { + c.count++ + n = t.Current().Copy() + if c.Root { + n.MoveToRoot() + } + } + return n +} + +func (c *contextQuery) Evaluate(iterator) interface{} { + c.count = 0 + return c +} + +func (c *contextQuery) Clone() query { + return &contextQuery{count: 0, Root: c.Root} +} + +// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*) +type ancestorQuery struct { + iterator func() NodeNavigator + + Self bool + Input query + Predicate func(NodeNavigator) bool +} + +func (a *ancestorQuery) Select(t iterator) NodeNavigator { + for { + if a.iterator == nil { + node := a.Input.Select(t) + if node == nil { + return nil + } + first := true + a.iterator = func() NodeNavigator { + if first && a.Self { + first = false + if a.Predicate(node) { + return node + } + } + for node.MoveToParent() { + if !a.Predicate(node) { + break + } + return node + } + return nil + } + } + + if node := a.iterator(); node != nil { + return node + } + a.iterator = nil + } +} + +func (a *ancestorQuery) Evaluate(t iterator) interface{} { + a.Input.Evaluate(t) + a.iterator = nil + return a +} + +func (a *ancestorQuery) Test(n NodeNavigator) bool { + return a.Predicate(n) +} + +func (a *ancestorQuery) Clone() query { + return &ancestorQuery{Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate} +} + +// attributeQuery is an XPath attribute node query.(@*) +type attributeQuery struct { + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (a *attributeQuery) Select(t iterator) NodeNavigator { + for { + if a.iterator == nil { + node := a.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + a.iterator = func() NodeNavigator { + for { + onAttr := node.MoveToNextAttribute() + if !onAttr { + return nil + } + if a.Predicate(node) { + return node + } + } + } + } + + if node := a.iterator(); node != nil { + return node + } + a.iterator = nil + } +} + +func (a *attributeQuery) Evaluate(t iterator) interface{} { + a.Input.Evaluate(t) + a.iterator = nil + return a +} + +func (a *attributeQuery) Test(n NodeNavigator) bool { + return a.Predicate(n) +} + +func (a *attributeQuery) Clone() query { + return &attributeQuery{Input: a.Input.Clone(), Predicate: a.Predicate} +} + +// childQuery is an XPath child node query.(child::*) +type childQuery struct { + posit int + iterator func() NodeNavigator + + Input query + Predicate func(NodeNavigator) bool +} + +func (c *childQuery) Select(t iterator) NodeNavigator { + for { + if c.iterator == nil { + c.posit = 0 + node := c.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + first := true + c.iterator = func() NodeNavigator { + for { + if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) { + return nil + } + first = false + if c.Predicate(node) { + return node + } + } + } + } + + if node := c.iterator(); node != nil { + c.posit++ + return node + } + c.iterator = nil + } +} + +func (c *childQuery) Evaluate(t iterator) interface{} { + c.Input.Evaluate(t) + c.iterator = nil + return c +} + +func (c *childQuery) Test(n NodeNavigator) bool { + return c.Predicate(n) +} + +func (c *childQuery) Clone() query { + return &childQuery{Input: c.Input.Clone(), Predicate: c.Predicate} +} + +// position returns a position of current NodeNavigator. +func (c *childQuery) position() int { + return c.posit +} + +// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*) +type descendantQuery struct { + iterator func() NodeNavigator + posit int + + Self bool + Input query + Predicate func(NodeNavigator) bool +} + +func (d *descendantQuery) Select(t iterator) NodeNavigator { + for { + if d.iterator == nil { + d.posit = 0 + node := d.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + level := 0 + first := true + d.iterator = func() NodeNavigator { + if first && d.Self { + first = false + if d.Predicate(node) { + return node + } + } + + for { + if node.MoveToChild() { + level++ + } else { + for { + if level == 0 { + return nil + } + if node.MoveToNext() { + break + } + node.MoveToParent() + level-- + } + } + if d.Predicate(node) { + return node + } + } + } + } + + if node := d.iterator(); node != nil { + d.posit++ + return node + } + d.iterator = nil + } +} + +func (d *descendantQuery) Evaluate(t iterator) interface{} { + d.Input.Evaluate(t) + d.iterator = nil + return d +} + +func (d *descendantQuery) Test(n NodeNavigator) bool { + return d.Predicate(n) +} + +// position returns a position of current NodeNavigator. +func (d *descendantQuery) position() int { + return d.posit +} + +func (d *descendantQuery) Clone() query { + return &descendantQuery{Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate} +} + +// followingQuery is an XPath following node query.(following::*|following-sibling::*) +type followingQuery struct { + iterator func() NodeNavigator + + Input query + Sibling bool // The matching sibling node of current node. + Predicate func(NodeNavigator) bool +} + +func (f *followingQuery) Select(t iterator) NodeNavigator { + for { + if f.iterator == nil { + node := f.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if f.Sibling { + f.iterator = func() NodeNavigator { + for { + if !node.MoveToNext() { + return nil + } + if f.Predicate(node) { + return node + } + } + } + } else { + var q query // descendant query + f.iterator = func() NodeNavigator { + for { + if q == nil { + for !node.MoveToNext() { + if !node.MoveToParent() { + return nil + } + } + q = &descendantQuery{ + Self: true, + Input: &contextQuery{}, + Predicate: f.Predicate, + } + t.Current().MoveTo(node) + } + if node := q.Select(t); node != nil { + return node + } + q = nil + } + } + } + } + + if node := f.iterator(); node != nil { + return node + } + f.iterator = nil + } +} + +func (f *followingQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + return f +} + +func (f *followingQuery) Test(n NodeNavigator) bool { + return f.Predicate(n) +} + +func (f *followingQuery) Clone() query { + return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate} +} + +// precedingQuery is an XPath preceding node query.(preceding::*) +type precedingQuery struct { + iterator func() NodeNavigator + Input query + Sibling bool // The matching sibling node of current node. + Predicate func(NodeNavigator) bool +} + +func (p *precedingQuery) Select(t iterator) NodeNavigator { + for { + if p.iterator == nil { + node := p.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if p.Sibling { + p.iterator = func() NodeNavigator { + for { + for !node.MoveToPrevious() { + return nil + } + if p.Predicate(node) { + return node + } + } + } + } else { + var q query + p.iterator = func() NodeNavigator { + for { + if q == nil { + for !node.MoveToPrevious() { + if !node.MoveToParent() { + return nil + } + } + q = &descendantQuery{ + Self: true, + Input: &contextQuery{}, + Predicate: p.Predicate, + } + t.Current().MoveTo(node) + } + if node := q.Select(t); node != nil { + return node + } + q = nil + } + } + } + } + if node := p.iterator(); node != nil { + return node + } + p.iterator = nil + } +} + +func (p *precedingQuery) Evaluate(t iterator) interface{} { + p.Input.Evaluate(t) + return p +} + +func (p *precedingQuery) Test(n NodeNavigator) bool { + return p.Predicate(n) +} + +func (p *precedingQuery) Clone() query { + return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate} +} + +// parentQuery is an XPath parent node query.(parent::*) +type parentQuery struct { + Input query + Predicate func(NodeNavigator) bool +} + +func (p *parentQuery) Select(t iterator) NodeNavigator { + for { + node := p.Input.Select(t) + if node == nil { + return nil + } + node = node.Copy() + if node.MoveToParent() && p.Predicate(node) { + return node + } + } +} + +func (p *parentQuery) Evaluate(t iterator) interface{} { + p.Input.Evaluate(t) + return p +} + +func (p *parentQuery) Clone() query { + return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate} +} + +func (p *parentQuery) Test(n NodeNavigator) bool { + return p.Predicate(n) +} + +// selfQuery is an Self node query.(self::*) +type selfQuery struct { + Input query + Predicate func(NodeNavigator) bool +} + +func (s *selfQuery) Select(t iterator) NodeNavigator { + for { + node := s.Input.Select(t) + if node == nil { + return nil + } + + if s.Predicate(node) { + return node + } + } +} + +func (s *selfQuery) Evaluate(t iterator) interface{} { + s.Input.Evaluate(t) + return s +} + +func (s *selfQuery) Test(n NodeNavigator) bool { + return s.Predicate(n) +} + +func (s *selfQuery) Clone() query { + return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate} +} + +// filterQuery is an XPath query for predicate filter. +type filterQuery struct { + Input query + Predicate query +} + +func (f *filterQuery) do(t iterator) bool { + val := reflect.ValueOf(f.Predicate.Evaluate(t)) + switch val.Kind() { + case reflect.Bool: + return val.Bool() + case reflect.String: + return len(val.String()) > 0 + case reflect.Float64: + pt := float64(getNodePosition(f.Input)) + return int(val.Float()) == int(pt) + default: + if q, ok := f.Predicate.(query); ok { + return q.Select(t) != nil + } + } + return false +} + +func (f *filterQuery) Select(t iterator) NodeNavigator { + for { + node := f.Input.Select(t) + if node == nil { + return node + } + node = node.Copy() + //fmt.Println(node.LocalName()) + + t.Current().MoveTo(node) + if f.do(t) { + return node + } + } +} + +func (f *filterQuery) Evaluate(t iterator) interface{} { + f.Input.Evaluate(t) + return f +} + +func (f *filterQuery) Clone() query { + return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()} +} + +// functionQuery is an XPath function that call a function to returns +// value of current NodeNavigator node. +type functionQuery struct { + Input query // Node Set + Func func(query, iterator) interface{} // The xpath function. +} + +func (f *functionQuery) Select(t iterator) NodeNavigator { + return nil +} + +// Evaluate call a specified function that will returns the +// following value type: number,string,boolean. +func (f *functionQuery) Evaluate(t iterator) interface{} { + return f.Func(f.Input, t) +} + +func (f *functionQuery) Clone() query { + return &functionQuery{Input: f.Input.Clone(), Func: f.Func} +} + +// constantQuery is an XPath constant operand. +type constantQuery struct { + Val interface{} +} + +func (c *constantQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (c *constantQuery) Evaluate(t iterator) interface{} { + return c.Val +} + +func (c *constantQuery) Clone() query { + return c +} + +// logicalQuery is an XPath logical expression. +type logicalQuery struct { + Left, Right query + + Do func(iterator, interface{}, interface{}) interface{} +} + +func (l *logicalQuery) Select(t iterator) NodeNavigator { + // When a XPath expr is logical expression. + node := t.Current().Copy() + val := l.Evaluate(t) + switch val.(type) { + case bool: + if val.(bool) == true { + return node + } + } + return nil +} + +func (l *logicalQuery) Evaluate(t iterator) interface{} { + m := l.Left.Evaluate(t) + n := l.Right.Evaluate(t) + return l.Do(t, m, n) +} + +func (l *logicalQuery) Clone() query { + return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do} +} + +// numericQuery is an XPath numeric operator expression. +type numericQuery struct { + Left, Right query + + Do func(interface{}, interface{}) interface{} +} + +func (n *numericQuery) Select(t iterator) NodeNavigator { + return nil +} + +func (n *numericQuery) Evaluate(t iterator) interface{} { + m := n.Left.Evaluate(t) + k := n.Right.Evaluate(t) + return n.Do(m, k) +} + +func (n *numericQuery) Clone() query { + return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do} +} + +type booleanQuery struct { + IsOr bool + Left, Right query + iterator func() NodeNavigator +} + +func (b *booleanQuery) Select(t iterator) NodeNavigator { + if b.iterator == nil { + var list []NodeNavigator + i := 0 + root := t.Current().Copy() + if b.IsOr { + for { + node := b.Left.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(list, node) + } + t.Current().MoveTo(root) + for { + node := b.Right.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(list, node) + } + } else { + var m []NodeNavigator + var n []NodeNavigator + for { + node := b.Left.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(m, node) + } + t.Current().MoveTo(root) + for { + node := b.Right.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(n, node) + } + for _, k := range m { + for _, j := range n { + if k == j { + list = append(list, k) + } + } + } + } + + b.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + node := list[i] + i++ + return node + } + } + return b.iterator() +} + +func (b *booleanQuery) Evaluate(t iterator) interface{} { + m := b.Left.Evaluate(t) + left := asBool(t, m) + if b.IsOr && left { + return true + } else if !b.IsOr && !left { + return false + } + m = b.Right.Evaluate(t) + return asBool(t, m) +} + +func (b *booleanQuery) Clone() query { + return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()} +} + +type unionQuery struct { + Left, Right query + iterator func() NodeNavigator +} + +func (u *unionQuery) Select(t iterator) NodeNavigator { + if u.iterator == nil { + var list []NodeNavigator + var i int + root := t.Current().Copy() + for { + node := u.Left.Select(t) + if node == nil { + break + } + node = node.Copy() + list = append(list, node) + } + t.Current().MoveTo(root) + for { + node := u.Right.Select(t) + if node == nil { + break + } + node = node.Copy() + var exists bool + for _, x := range list { + if reflect.DeepEqual(x, node) { + exists = true + break + } + } + if !exists { + list = append(list, node) + } + } + u.iterator = func() NodeNavigator { + if i >= len(list) { + return nil + } + node := list[i] + i++ + return node + } + } + return u.iterator() +} + +func (u *unionQuery) Evaluate(t iterator) interface{} { + u.iterator = nil + u.Left.Evaluate(t) + u.Right.Evaluate(t) + return u +} + +func (u *unionQuery) Clone() query { + return &unionQuery{Left: u.Left.Clone(), Right: u.Right.Clone()} +} + +func getNodePosition(q query) int { + type Position interface { + position() int + } + if count, ok := q.(Position); ok { + return count.position() + } + return 1 +} diff --git a/vendor/github.com/antchfx/xpath/xpath.go b/vendor/github.com/antchfx/xpath/xpath.go new file mode 100644 index 0000000..7e3f52c --- /dev/null +++ b/vendor/github.com/antchfx/xpath/xpath.go @@ -0,0 +1,157 @@ +package xpath + +import ( + "errors" +) + +// NodeType represents a type of XPath node. +type NodeType int + +const ( + // RootNode is a root node of the XML document or node tree. + RootNode NodeType = iota + + // ElementNode is an element, such as . + ElementNode + + // AttributeNode is an attribute, such as id='123'. + AttributeNode + + // TextNode is the text content of a node. + TextNode + + // CommentNode is a comment node, such as + CommentNode + + // allNode is any types of node, used by xpath package only to predicate match. + allNode +) + +// NodeNavigator provides cursor model for navigating XML data. +type NodeNavigator interface { + // NodeType returns the XPathNodeType of the current node. + NodeType() NodeType + + // LocalName gets the Name of the current node. + LocalName() string + + // Prefix returns namespace prefix associated with the current node. + Prefix() string + + // Value gets the value of current node. + Value() string + + // Copy does a deep copy of the NodeNavigator and all its components. + Copy() NodeNavigator + + // MoveToRoot moves the NodeNavigator to the root node of the current node. + MoveToRoot() + + // MoveToParent moves the NodeNavigator to the parent node of the current node. + MoveToParent() bool + + // MoveToNextAttribute moves the NodeNavigator to the next attribute on current node. + MoveToNextAttribute() bool + + // MoveToChild moves the NodeNavigator to the first child node of the current node. + MoveToChild() bool + + // MoveToFirst moves the NodeNavigator to the first sibling node of the current node. + MoveToFirst() bool + + // MoveToNext moves the NodeNavigator to the next sibling node of the current node. + MoveToNext() bool + + // MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node. + MoveToPrevious() bool + + // MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator. + MoveTo(NodeNavigator) bool +} + +// NodeIterator holds all matched Node object. +type NodeIterator struct { + node NodeNavigator + query query +} + +// Current returns current node which matched. +func (t *NodeIterator) Current() NodeNavigator { + return t.node +} + +// MoveNext moves Navigator to the next match node. +func (t *NodeIterator) MoveNext() bool { + n := t.query.Select(t) + if n != nil { + if !t.node.MoveTo(n) { + t.node = n.Copy() + } + return true + } + return false +} + +// Select selects a node set using the specified XPath expression. +// This method is deprecated, recommend using Expr.Select() method instead. +func Select(root NodeNavigator, expr string) *NodeIterator { + exp, err := Compile(expr) + if err != nil { + panic(err) + } + return exp.Select(root) +} + +// Expr is an XPath expression for query. +type Expr struct { + s string + q query +} + +type iteratorFunc func() NodeNavigator + +func (f iteratorFunc) Current() NodeNavigator { + return f() +} + +// Evaluate returns the result of the expression. +// The result type of the expression is one of the follow: bool,float64,string,NodeIterator). +func (expr *Expr) Evaluate(root NodeNavigator) interface{} { + val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root })) + switch val.(type) { + case query: + return &NodeIterator{query: expr.q.Clone(), node: root} + } + return val +} + +// Select selects a node set using the specified XPath expression. +func (expr *Expr) Select(root NodeNavigator) *NodeIterator { + return &NodeIterator{query: expr.q.Clone(), node: root} +} + +// String returns XPath expression string. +func (expr *Expr) String() string { + return expr.s +} + +// Compile compiles an XPath expression string. +func Compile(expr string) (*Expr, error) { + if expr == "" { + return nil, errors.New("expr expression is nil") + } + qy, err := build(expr) + if err != nil { + return nil, err + } + return &Expr{s: expr, q: qy}, nil +} + +// MustCompile compiles an XPath expression string and ignored error. +func MustCompile(expr string) *Expr { + exp, err := Compile(expr) + if err != nil { + return nil + } + return exp +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index 5765acb..97843b2 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -1,20 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: gogo.proto -/* -Package gogoproto is a generated protocol buffer package. - -It is generated from these files: - gogo.proto - -It has these top-level messages: -*/ -package gogoproto +package gogoproto // import "github.com/gogo/protobuf/gogoproto" import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -28,7 +20,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62001, Name: "gogoproto.goproto_enum_prefix", @@ -37,7 +29,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ } var E_GoprotoEnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62021, Name: "gogoproto.goproto_enum_stringer", @@ -46,7 +38,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{ } var E_EnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62022, Name: "gogoproto.enum_stringer", @@ -55,7 +47,7 @@ var E_EnumStringer = &proto.ExtensionDesc{ } var E_EnumCustomname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*string)(nil), Field: 62023, Name: "gogoproto.enum_customname", @@ -64,7 +56,7 @@ var E_EnumCustomname = &proto.ExtensionDesc{ } var E_Enumdecl = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtendedType: (*descriptor.EnumOptions)(nil), ExtensionType: (*bool)(nil), Field: 62024, Name: "gogoproto.enumdecl", @@ -73,7 +65,7 @@ var E_Enumdecl = &proto.ExtensionDesc{ } var E_EnumvalueCustomname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.EnumValueOptions)(nil), + ExtendedType: (*descriptor.EnumValueOptions)(nil), ExtensionType: (*string)(nil), Field: 66001, Name: "gogoproto.enumvalue_customname", @@ -82,7 +74,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{ } var E_GoprotoGettersAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63001, Name: "gogoproto.goproto_getters_all", @@ -91,7 +83,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{ } var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63002, Name: "gogoproto.goproto_enum_prefix_all", @@ -100,7 +92,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ } var E_GoprotoStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63003, Name: "gogoproto.goproto_stringer_all", @@ -109,7 +101,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{ } var E_VerboseEqualAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63004, Name: "gogoproto.verbose_equal_all", @@ -118,7 +110,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{ } var E_FaceAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63005, Name: "gogoproto.face_all", @@ -127,7 +119,7 @@ var E_FaceAll = &proto.ExtensionDesc{ } var E_GostringAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63006, Name: "gogoproto.gostring_all", @@ -136,7 +128,7 @@ var E_GostringAll = &proto.ExtensionDesc{ } var E_PopulateAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63007, Name: "gogoproto.populate_all", @@ -145,7 +137,7 @@ var E_PopulateAll = &proto.ExtensionDesc{ } var E_StringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63008, Name: "gogoproto.stringer_all", @@ -154,7 +146,7 @@ var E_StringerAll = &proto.ExtensionDesc{ } var E_OnlyoneAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63009, Name: "gogoproto.onlyone_all", @@ -163,7 +155,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{ } var E_EqualAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63013, Name: "gogoproto.equal_all", @@ -172,7 +164,7 @@ var E_EqualAll = &proto.ExtensionDesc{ } var E_DescriptionAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63014, Name: "gogoproto.description_all", @@ -181,7 +173,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{ } var E_TestgenAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63015, Name: "gogoproto.testgen_all", @@ -190,7 +182,7 @@ var E_TestgenAll = &proto.ExtensionDesc{ } var E_BenchgenAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63016, Name: "gogoproto.benchgen_all", @@ -199,7 +191,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{ } var E_MarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63017, Name: "gogoproto.marshaler_all", @@ -208,7 +200,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{ } var E_UnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63018, Name: "gogoproto.unmarshaler_all", @@ -217,7 +209,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{ } var E_StableMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63019, Name: "gogoproto.stable_marshaler_all", @@ -226,7 +218,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{ } var E_SizerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63020, Name: "gogoproto.sizer_all", @@ -235,7 +227,7 @@ var E_SizerAll = &proto.ExtensionDesc{ } var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63021, Name: "gogoproto.goproto_enum_stringer_all", @@ -244,7 +236,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ } var E_EnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63022, Name: "gogoproto.enum_stringer_all", @@ -253,7 +245,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{ } var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63023, Name: "gogoproto.unsafe_marshaler_all", @@ -262,7 +254,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ } var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63024, Name: "gogoproto.unsafe_unmarshaler_all", @@ -271,7 +263,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ } var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63025, Name: "gogoproto.goproto_extensions_map_all", @@ -280,7 +272,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ } var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63026, Name: "gogoproto.goproto_unrecognized_all", @@ -289,7 +281,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ } var E_GogoprotoImport = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63027, Name: "gogoproto.gogoproto_import", @@ -298,7 +290,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ } var E_ProtosizerAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63028, Name: "gogoproto.protosizer_all", @@ -307,7 +299,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{ } var E_CompareAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63029, Name: "gogoproto.compare_all", @@ -316,7 +308,7 @@ var E_CompareAll = &proto.ExtensionDesc{ } var E_TypedeclAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63030, Name: "gogoproto.typedecl_all", @@ -325,7 +317,7 @@ var E_TypedeclAll = &proto.ExtensionDesc{ } var E_EnumdeclAll = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63031, Name: "gogoproto.enumdecl_all", @@ -334,7 +326,7 @@ var E_EnumdeclAll = &proto.ExtensionDesc{ } var E_GoprotoRegistration = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtendedType: (*descriptor.FileOptions)(nil), ExtensionType: (*bool)(nil), Field: 63032, Name: "gogoproto.goproto_registration", @@ -342,8 +334,17 @@ var E_GoprotoRegistration = &proto.ExtensionDesc{ Filename: "gogo.proto", } +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all,json=messagenameAll", + Filename: "gogo.proto", +} + var E_GoprotoGetters = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64001, Name: "gogoproto.goproto_getters", @@ -352,7 +353,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{ } var E_GoprotoStringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64003, Name: "gogoproto.goproto_stringer", @@ -361,7 +362,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{ } var E_VerboseEqual = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64004, Name: "gogoproto.verbose_equal", @@ -370,7 +371,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{ } var E_Face = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64005, Name: "gogoproto.face", @@ -379,7 +380,7 @@ var E_Face = &proto.ExtensionDesc{ } var E_Gostring = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64006, Name: "gogoproto.gostring", @@ -388,7 +389,7 @@ var E_Gostring = &proto.ExtensionDesc{ } var E_Populate = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64007, Name: "gogoproto.populate", @@ -397,7 +398,7 @@ var E_Populate = &proto.ExtensionDesc{ } var E_Stringer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 67008, Name: "gogoproto.stringer", @@ -406,7 +407,7 @@ var E_Stringer = &proto.ExtensionDesc{ } var E_Onlyone = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64009, Name: "gogoproto.onlyone", @@ -415,7 +416,7 @@ var E_Onlyone = &proto.ExtensionDesc{ } var E_Equal = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64013, Name: "gogoproto.equal", @@ -424,7 +425,7 @@ var E_Equal = &proto.ExtensionDesc{ } var E_Description = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64014, Name: "gogoproto.description", @@ -433,7 +434,7 @@ var E_Description = &proto.ExtensionDesc{ } var E_Testgen = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64015, Name: "gogoproto.testgen", @@ -442,7 +443,7 @@ var E_Testgen = &proto.ExtensionDesc{ } var E_Benchgen = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64016, Name: "gogoproto.benchgen", @@ -451,7 +452,7 @@ var E_Benchgen = &proto.ExtensionDesc{ } var E_Marshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64017, Name: "gogoproto.marshaler", @@ -460,7 +461,7 @@ var E_Marshaler = &proto.ExtensionDesc{ } var E_Unmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64018, Name: "gogoproto.unmarshaler", @@ -469,7 +470,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{ } var E_StableMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64019, Name: "gogoproto.stable_marshaler", @@ -478,7 +479,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{ } var E_Sizer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64020, Name: "gogoproto.sizer", @@ -487,7 +488,7 @@ var E_Sizer = &proto.ExtensionDesc{ } var E_UnsafeMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64023, Name: "gogoproto.unsafe_marshaler", @@ -496,7 +497,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{ } var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64024, Name: "gogoproto.unsafe_unmarshaler", @@ -505,7 +506,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ } var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64025, Name: "gogoproto.goproto_extensions_map", @@ -514,7 +515,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ } var E_GoprotoUnrecognized = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64026, Name: "gogoproto.goproto_unrecognized", @@ -523,7 +524,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ } var E_Protosizer = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64028, Name: "gogoproto.protosizer", @@ -532,7 +533,7 @@ var E_Protosizer = &proto.ExtensionDesc{ } var E_Compare = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64029, Name: "gogoproto.compare", @@ -541,7 +542,7 @@ var E_Compare = &proto.ExtensionDesc{ } var E_Typedecl = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtendedType: (*descriptor.MessageOptions)(nil), ExtensionType: (*bool)(nil), Field: 64030, Name: "gogoproto.typedecl", @@ -549,8 +550,17 @@ var E_Typedecl = &proto.ExtensionDesc{ Filename: "gogo.proto", } +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65001, Name: "gogoproto.nullable", @@ -559,7 +569,7 @@ var E_Nullable = &proto.ExtensionDesc{ } var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65002, Name: "gogoproto.embed", @@ -568,7 +578,7 @@ var E_Embed = &proto.ExtensionDesc{ } var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65003, Name: "gogoproto.customtype", @@ -577,7 +587,7 @@ var E_Customtype = &proto.ExtensionDesc{ } var E_Customname = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65004, Name: "gogoproto.customname", @@ -586,7 +596,7 @@ var E_Customname = &proto.ExtensionDesc{ } var E_Jsontag = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65005, Name: "gogoproto.jsontag", @@ -595,7 +605,7 @@ var E_Jsontag = &proto.ExtensionDesc{ } var E_Moretags = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65006, Name: "gogoproto.moretags", @@ -604,7 +614,7 @@ var E_Moretags = &proto.ExtensionDesc{ } var E_Casttype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65007, Name: "gogoproto.casttype", @@ -613,7 +623,7 @@ var E_Casttype = &proto.ExtensionDesc{ } var E_Castkey = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65008, Name: "gogoproto.castkey", @@ -622,7 +632,7 @@ var E_Castkey = &proto.ExtensionDesc{ } var E_Castvalue = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*string)(nil), Field: 65009, Name: "gogoproto.castvalue", @@ -631,7 +641,7 @@ var E_Castvalue = &proto.ExtensionDesc{ } var E_Stdtime = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65010, Name: "gogoproto.stdtime", @@ -640,7 +650,7 @@ var E_Stdtime = &proto.ExtensionDesc{ } var E_Stdduration = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtendedType: (*descriptor.FieldOptions)(nil), ExtensionType: (*bool)(nil), Field: 65011, Name: "gogoproto.stdduration", @@ -684,6 +694,7 @@ func init() { proto.RegisterExtension(E_TypedeclAll) proto.RegisterExtension(E_EnumdeclAll) proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -707,6 +718,7 @@ func init() { proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Compare) proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) @@ -720,85 +732,86 @@ func init() { proto.RegisterExtension(E_Stdduration) } -func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } - -var fileDescriptorGogo = []byte{ - // 1220 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x4b, 0x6f, 0x1c, 0x45, - 0x10, 0x80, 0x85, 0x48, 0x14, 0x6f, 0xd9, 0x8e, 0xf1, 0xda, 0x98, 0x10, 0x81, 0x08, 0x9c, 0x38, - 0xd9, 0xa7, 0x08, 0xa5, 0xad, 0xc8, 0x72, 0x2c, 0xc7, 0x4a, 0x84, 0xc1, 0x98, 0x38, 0xbc, 0x0e, - 0xab, 0xd9, 0xdd, 0xf6, 0x78, 0x60, 0x66, 0x7a, 0x98, 0xe9, 0x89, 0xe2, 0xdc, 0x50, 0x78, 0x08, - 0x21, 0xde, 0x48, 0x90, 0x90, 0x04, 0x38, 0xf0, 0x7e, 0x86, 0xf7, 0x91, 0x0b, 0x8f, 0x2b, 0xff, - 0x81, 0x0b, 0x60, 0xde, 0xbe, 0xf9, 0x82, 0x6a, 0xb6, 0x6a, 0xb6, 0x67, 0xbd, 0x52, 0xf7, 0xde, - 0xc6, 0xeb, 0xfe, 0xbe, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x59, 0x00, 0x5f, 0xf9, 0x6a, 0x3a, 0x49, - 0x95, 0x56, 0xf5, 0x1a, 0x5e, 0x17, 0x97, 0x07, 0x0f, 0xf9, 0x4a, 0xf9, 0xa1, 0x9c, 0x29, 0xfe, - 0x6a, 0xe6, 0xeb, 0x33, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, 0xc1, - 0x04, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0xd7, 0x83, 0xb3, 0xf5, 0x9b, 0xa6, 0x3b, - 0xe4, 0x34, 0x93, 0xd3, 0x8b, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0x81, 0xab, 0xbf, - 0x5c, 0x7b, 0xe8, 0x9a, 0xdb, 0x87, 0x56, 0xc7, 0x09, 0xc5, 0xff, 0xad, 0x14, 0xa0, 0x58, 0x85, - 0xeb, 0x2b, 0xbe, 0x4c, 0xa7, 0x41, 0xec, 0xcb, 0xd4, 0x62, 0xfc, 0x9e, 0x8c, 0x13, 0x86, 0xf1, - 0x5e, 0x42, 0xc5, 0x02, 0x8c, 0x0e, 0xe2, 0xfa, 0x81, 0x5c, 0x23, 0xd2, 0x94, 0x2c, 0xc1, 0x58, - 0x21, 0x69, 0xe5, 0x99, 0x56, 0x51, 0xec, 0x45, 0xd2, 0xa2, 0xf9, 0xb1, 0xd0, 0xd4, 0x56, 0xf7, - 0x23, 0xb6, 0x50, 0x52, 0x42, 0xc0, 0x10, 0x7e, 0xd2, 0x96, 0xad, 0xd0, 0x62, 0xf8, 0x89, 0x02, - 0x29, 0xd7, 0x8b, 0xd3, 0x30, 0x89, 0xd7, 0x67, 0xbc, 0x30, 0x97, 0x66, 0x24, 0xb7, 0xf6, 0xf5, - 0x9c, 0xc6, 0x65, 0x2c, 0xfb, 0xf9, 0xfc, 0x9e, 0x22, 0x9c, 0x89, 0x52, 0x60, 0xc4, 0x64, 0x54, - 0xd1, 0x97, 0x5a, 0xcb, 0x34, 0x6b, 0x78, 0x61, 0xbf, 0xf0, 0x8e, 0x07, 0x61, 0x69, 0xbc, 0xb0, - 0x55, 0xad, 0xe2, 0x52, 0x87, 0x9c, 0x0f, 0x43, 0xb1, 0x06, 0x37, 0xf4, 0x79, 0x2a, 0x1c, 0x9c, - 0x17, 0xc9, 0x39, 0xb9, 0xeb, 0xc9, 0x40, 0xed, 0x0a, 0xf0, 0xe7, 0x65, 0x2d, 0x1d, 0x9c, 0xaf, - 0x93, 0xb3, 0x4e, 0x2c, 0x97, 0x14, 0x8d, 0x27, 0x61, 0xfc, 0x8c, 0x4c, 0x9b, 0x2a, 0x93, 0x0d, - 0xf9, 0x68, 0xee, 0x85, 0x0e, 0xba, 0x4b, 0xa4, 0x1b, 0x23, 0x70, 0x11, 0x39, 0x74, 0x1d, 0x81, - 0xa1, 0x75, 0xaf, 0x25, 0x1d, 0x14, 0x97, 0x49, 0xb1, 0x0f, 0xd7, 0x23, 0x3a, 0x0f, 0x23, 0xbe, - 0xea, 0xdc, 0x92, 0x03, 0x7e, 0x85, 0xf0, 0x61, 0x66, 0x48, 0x91, 0xa8, 0x24, 0x0f, 0x3d, 0xed, - 0x12, 0xc1, 0x1b, 0xac, 0x60, 0x86, 0x14, 0x03, 0xa4, 0xf5, 0x4d, 0x56, 0x64, 0x46, 0x3e, 0xe7, - 0x60, 0x58, 0xc5, 0xe1, 0xa6, 0x8a, 0x5d, 0x82, 0x78, 0x8b, 0x0c, 0x40, 0x08, 0x0a, 0x66, 0xa1, - 0xe6, 0x5a, 0x88, 0xb7, 0xb7, 0x78, 0x7b, 0x70, 0x05, 0x96, 0x60, 0x8c, 0x1b, 0x54, 0xa0, 0x62, - 0x07, 0xc5, 0x3b, 0xa4, 0xd8, 0x6f, 0x60, 0x74, 0x1b, 0x5a, 0x66, 0xda, 0x97, 0x2e, 0x92, 0x77, - 0xf9, 0x36, 0x08, 0xa1, 0x54, 0x36, 0x65, 0xdc, 0xda, 0x70, 0x33, 0xbc, 0xc7, 0xa9, 0x64, 0x06, - 0x15, 0x0b, 0x30, 0x1a, 0x79, 0x69, 0xb6, 0xe1, 0x85, 0x4e, 0xe5, 0x78, 0x9f, 0x1c, 0x23, 0x25, - 0x44, 0x19, 0xc9, 0xe3, 0x41, 0x34, 0x1f, 0x70, 0x46, 0x0c, 0x8c, 0xb6, 0x5e, 0xa6, 0xbd, 0x66, - 0x28, 0x1b, 0x83, 0xd8, 0x3e, 0xe4, 0xad, 0xd7, 0x61, 0x97, 0x4d, 0xe3, 0x2c, 0xd4, 0xb2, 0xe0, - 0x9c, 0x93, 0xe6, 0x23, 0xae, 0x74, 0x01, 0x20, 0xfc, 0x00, 0xdc, 0xd8, 0x77, 0x4c, 0x38, 0xc8, - 0x3e, 0x26, 0xd9, 0x54, 0x9f, 0x51, 0x41, 0x2d, 0x61, 0x50, 0xe5, 0x27, 0xdc, 0x12, 0x64, 0x8f, - 0x6b, 0x05, 0x26, 0xf3, 0x38, 0xf3, 0xd6, 0x07, 0xcb, 0xda, 0xa7, 0x9c, 0xb5, 0x0e, 0x5b, 0xc9, - 0xda, 0x29, 0x98, 0x22, 0xe3, 0x60, 0x75, 0xfd, 0x8c, 0x1b, 0x6b, 0x87, 0x5e, 0xab, 0x56, 0xf7, - 0x21, 0x38, 0x58, 0xa6, 0xf3, 0xac, 0x96, 0x71, 0x86, 0x4c, 0x23, 0xf2, 0x12, 0x07, 0xf3, 0x55, - 0x32, 0x73, 0xc7, 0x5f, 0x2c, 0x05, 0xcb, 0x5e, 0x82, 0xf2, 0xfb, 0xe1, 0x00, 0xcb, 0xf3, 0x38, - 0x95, 0x2d, 0xe5, 0xc7, 0xc1, 0x39, 0xd9, 0x76, 0x50, 0x7f, 0xde, 0x53, 0xaa, 0x35, 0x03, 0x47, - 0xf3, 0x09, 0xb8, 0xae, 0x3c, 0xab, 0x34, 0x82, 0x28, 0x51, 0xa9, 0xb6, 0x18, 0xbf, 0xe0, 0x4a, - 0x95, 0xdc, 0x89, 0x02, 0x13, 0x8b, 0xb0, 0xbf, 0xf8, 0xd3, 0xf5, 0x91, 0xfc, 0x92, 0x44, 0xa3, - 0x5d, 0x8a, 0x1a, 0x47, 0x4b, 0x45, 0x89, 0x97, 0xba, 0xf4, 0xbf, 0xaf, 0xb8, 0x71, 0x10, 0x42, - 0x8d, 0x43, 0x6f, 0x26, 0x12, 0xa7, 0xbd, 0x83, 0xe1, 0x6b, 0x6e, 0x1c, 0xcc, 0x90, 0x82, 0x0f, - 0x0c, 0x0e, 0x8a, 0x6f, 0x58, 0xc1, 0x0c, 0x2a, 0xee, 0xe9, 0x0e, 0xda, 0x54, 0xfa, 0x41, 0xa6, - 0x53, 0x0f, 0x57, 0x5b, 0x54, 0xdf, 0x6e, 0x55, 0x0f, 0x61, 0xab, 0x06, 0x2a, 0x4e, 0xc2, 0x58, - 0xcf, 0x11, 0xa3, 0x7e, 0xcb, 0x2e, 0xdb, 0xb2, 0xcc, 0x32, 0xcf, 0x2f, 0x85, 0x8f, 0x6d, 0x53, - 0x33, 0xaa, 0x9e, 0x30, 0xc4, 0x9d, 0x58, 0xf7, 0xea, 0x39, 0xc0, 0x2e, 0x3b, 0xbf, 0x5d, 0x96, - 0xbe, 0x72, 0x0c, 0x10, 0xc7, 0x61, 0xb4, 0x72, 0x06, 0xb0, 0xab, 0x1e, 0x27, 0xd5, 0x88, 0x79, - 0x04, 0x10, 0x87, 0x61, 0x0f, 0xce, 0x73, 0x3b, 0xfe, 0x04, 0xe1, 0xc5, 0x72, 0x71, 0x14, 0x86, - 0x78, 0x8e, 0xdb, 0xd1, 0x27, 0x09, 0x2d, 0x11, 0xc4, 0x79, 0x86, 0xdb, 0xf1, 0xa7, 0x18, 0x67, - 0x04, 0x71, 0xf7, 0x14, 0x7e, 0xf7, 0xcc, 0x1e, 0xea, 0xc3, 0x9c, 0xbb, 0x59, 0xd8, 0x47, 0xc3, - 0xdb, 0x4e, 0x3f, 0x4d, 0x5f, 0xce, 0x84, 0xb8, 0x03, 0xf6, 0x3a, 0x26, 0xfc, 0x59, 0x42, 0x3b, - 0xeb, 0xc5, 0x02, 0x0c, 0x1b, 0x03, 0xdb, 0x8e, 0x3f, 0x47, 0xb8, 0x49, 0x61, 0xe8, 0x34, 0xb0, - 0xed, 0x82, 0xe7, 0x39, 0x74, 0x22, 0x30, 0x6d, 0x3c, 0xab, 0xed, 0xf4, 0x0b, 0x9c, 0x75, 0x46, - 0xc4, 0x1c, 0xd4, 0xca, 0xfe, 0x6b, 0xe7, 0x5f, 0x24, 0xbe, 0xcb, 0x60, 0x06, 0x8c, 0xfe, 0x6f, - 0x57, 0xbc, 0xc4, 0x19, 0x30, 0x28, 0xdc, 0x46, 0xbd, 0x33, 0xdd, 0x6e, 0x7a, 0x99, 0xb7, 0x51, - 0xcf, 0x48, 0xc7, 0x6a, 0x16, 0x6d, 0xd0, 0xae, 0x78, 0x85, 0xab, 0x59, 0xac, 0xc7, 0x30, 0x7a, - 0x87, 0xa4, 0xdd, 0xf1, 0x2a, 0x87, 0xd1, 0x33, 0x23, 0xc5, 0x0a, 0xd4, 0x77, 0x0f, 0x48, 0xbb, - 0xef, 0x35, 0xf2, 0x8d, 0xef, 0x9a, 0x8f, 0xe2, 0x3e, 0x98, 0xea, 0x3f, 0x1c, 0xed, 0xd6, 0x0b, - 0xdb, 0x3d, 0xaf, 0x33, 0xe6, 0x6c, 0x14, 0xa7, 0xba, 0x5d, 0xd6, 0x1c, 0x8c, 0x76, 0xed, 0xc5, - 0xed, 0x6a, 0xa3, 0x35, 0xe7, 0xa2, 0x98, 0x07, 0xe8, 0xce, 0x24, 0xbb, 0xeb, 0x12, 0xb9, 0x0c, - 0x08, 0xb7, 0x06, 0x8d, 0x24, 0x3b, 0x7f, 0x99, 0xb7, 0x06, 0x11, 0xb8, 0x35, 0x78, 0x1a, 0xd9, - 0xe9, 0x2b, 0xbc, 0x35, 0x18, 0x11, 0xb3, 0x30, 0x14, 0xe7, 0x61, 0x88, 0xcf, 0x56, 0xfd, 0xe6, - 0x3e, 0xe3, 0x46, 0x86, 0x6d, 0x86, 0x7f, 0xdd, 0x21, 0x98, 0x01, 0x71, 0x18, 0xf6, 0xca, 0xa8, - 0x29, 0xdb, 0x36, 0xf2, 0xb7, 0x1d, 0xee, 0x27, 0xb8, 0x5a, 0xcc, 0x01, 0x74, 0x5e, 0xa6, 0x31, - 0x0a, 0x1b, 0xfb, 0xfb, 0x4e, 0xe7, 0xbd, 0xde, 0x40, 0xba, 0x82, 0xe2, 0x6d, 0xdc, 0x22, 0xd8, - 0xaa, 0x0a, 0x8a, 0x17, 0xf0, 0x23, 0xb0, 0xef, 0xe1, 0x4c, 0xc5, 0xda, 0xf3, 0x6d, 0xf4, 0x1f, - 0x44, 0xf3, 0x7a, 0x4c, 0x58, 0xa4, 0x52, 0xa9, 0x3d, 0x3f, 0xb3, 0xb1, 0x7f, 0x12, 0x5b, 0x02, - 0x08, 0xb7, 0xbc, 0x4c, 0xbb, 0xdc, 0xf7, 0x5f, 0x0c, 0x33, 0x80, 0x41, 0xe3, 0xf5, 0x23, 0x72, - 0xd3, 0xc6, 0xfe, 0xcd, 0x41, 0xd3, 0x7a, 0x71, 0x14, 0x6a, 0x78, 0x59, 0xfc, 0x0e, 0x61, 0x83, - 0xff, 0x21, 0xb8, 0x4b, 0xe0, 0x37, 0x67, 0xba, 0xad, 0x03, 0x7b, 0xb2, 0xff, 0xa5, 0x4a, 0xf3, - 0x7a, 0x31, 0x0f, 0xc3, 0x99, 0x6e, 0xb7, 0x73, 0x3a, 0xd1, 0x58, 0xf0, 0xff, 0x76, 0xca, 0x97, - 0xdc, 0x92, 0x39, 0xb6, 0x08, 0x13, 0x2d, 0x15, 0xf5, 0x82, 0xc7, 0x60, 0x49, 0x2d, 0xa9, 0x95, - 0x62, 0x17, 0x3d, 0x78, 0x9b, 0x1f, 0xe8, 0x8d, 0xbc, 0x39, 0xdd, 0x52, 0xd1, 0x0c, 0x1e, 0x35, - 0xbb, 0xbf, 0xa0, 0x95, 0x07, 0xcf, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xed, 0x5f, 0x6c, 0x20, - 0x74, 0x13, 0x00, 0x00, +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_gogo_68790841c0f79064) } + +var fileDescriptor_gogo_68790841c0f79064 = []byte{ + // 1246 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x70, 0x64, 0xcf, 0xf3, 0x86, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x06, 0x63, 0xe2, 0xb0, 0x1d, 0x46, + 0x3d, 0x33, 0xe5, 0x76, 0x43, 0x77, 0xd7, 0xd0, 0x5d, 0x1d, 0xc5, 0xb9, 0xa1, 0xb0, 0x08, 0x21, + 0x76, 0x24, 0x48, 0x48, 0x02, 0x39, 0xb0, 0xaf, 0x61, 0xe7, 0xc6, 0x85, 0xe5, 0xca, 0x7f, 0xe0, + 0x02, 0x98, 0xdd, 0x37, 0x5f, 0xa2, 0xd7, 0xfd, 0x5e, 0x4f, 0xcd, 0x78, 0xa4, 0xaa, 0xb9, 0xb5, + 0xed, 0xfa, 0x3e, 0x57, 0xbf, 0x57, 0xf5, 0xde, 0x9b, 0x01, 0xf0, 0x95, 0xaf, 0x66, 0x5a, 0x89, + 0xd2, 0xaa, 0x5a, 0xc1, 0xe7, 0xfc, 0xf1, 0xc0, 0x41, 0x5f, 0x29, 0x3f, 0x94, 0xb3, 0xf9, 0x4f, + 0xf5, 0x6c, 0x63, 0xb6, 0x29, 0xd3, 0x46, 0x12, 0xb4, 0xb4, 0x4a, 0x8a, 0xc5, 0xe2, 0x6e, 0x98, + 0xa4, 0xc5, 0x35, 0x19, 0x67, 0x51, 0xad, 0x95, 0xc8, 0x8d, 0xe0, 0x74, 0xf5, 0xa6, 0x99, 0x82, + 0x9c, 0x61, 0x72, 0x66, 0x29, 0xce, 0xa2, 0x7b, 0x5a, 0x3a, 0x50, 0x71, 0xba, 0xff, 0xca, 0xaf, + 0xd7, 0x1e, 0xbc, 0xe6, 0xf6, 0xa1, 0xb5, 0x09, 0x42, 0xf1, 0x6f, 0xab, 0x39, 0x28, 0xd6, 0xe0, + 0xfa, 0x0e, 0x5f, 0xaa, 0x93, 0x20, 0xf6, 0x65, 0x62, 0x31, 0xfe, 0x40, 0xc6, 0x49, 0xc3, 0x78, + 0x1f, 0xa1, 0x62, 0x11, 0x46, 0xfb, 0x71, 0xfd, 0x48, 0xae, 0x11, 0x69, 0x4a, 0x96, 0x61, 0x3c, + 0x97, 0x34, 0xb2, 0x54, 0xab, 0x28, 0xf6, 0x22, 0x69, 0xd1, 0xfc, 0x94, 0x6b, 0x2a, 0x6b, 0x63, + 0x88, 0x2d, 0x96, 0x94, 0x10, 0x30, 0x84, 0xbf, 0x69, 0xca, 0x46, 0x68, 0x31, 0xfc, 0x4c, 0x1b, + 0x29, 0xd7, 0x8b, 0x93, 0x30, 0x85, 0xcf, 0xa7, 0xbc, 0x30, 0x93, 0xe6, 0x4e, 0x6e, 0xed, 0xe9, + 0x39, 0x89, 0xcb, 0x58, 0xf6, 0xcb, 0xd9, 0x81, 0x7c, 0x3b, 0x93, 0xa5, 0xc0, 0xd8, 0x93, 0x91, + 0x45, 0x5f, 0x6a, 0x2d, 0x93, 0xb4, 0xe6, 0x85, 0xbd, 0xb6, 0x77, 0x2c, 0x08, 0x4b, 0xe3, 0xb9, + 0xed, 0xce, 0x2c, 0x2e, 0x17, 0xe4, 0x42, 0x18, 0x8a, 0x75, 0xb8, 0xa1, 0xc7, 0xa9, 0x70, 0x70, + 0x9e, 0x27, 0xe7, 0xd4, 0x9e, 0x93, 0x81, 0xda, 0x55, 0xe0, 0xdf, 0x97, 0xb9, 0x74, 0x70, 0xbe, + 0x41, 0xce, 0x2a, 0xb1, 0x9c, 0x52, 0x34, 0xde, 0x09, 0x13, 0xa7, 0x64, 0x52, 0x57, 0xa9, 0xac, + 0xc9, 0xc7, 0x32, 0x2f, 0x74, 0xd0, 0x5d, 0x20, 0xdd, 0x38, 0x81, 0x4b, 0xc8, 0xa1, 0xeb, 0x30, + 0x0c, 0x6d, 0x78, 0x0d, 0xe9, 0xa0, 0xb8, 0x48, 0x8a, 0x41, 0x5c, 0x8f, 0xe8, 0x02, 0x8c, 0xf8, + 0xaa, 0x78, 0x25, 0x07, 0xfc, 0x12, 0xe1, 0xc3, 0xcc, 0x90, 0xa2, 0xa5, 0x5a, 0x59, 0xe8, 0x69, + 0x97, 0x1d, 0xbc, 0xc9, 0x0a, 0x66, 0x48, 0xd1, 0x47, 0x58, 0xdf, 0x62, 0x45, 0x6a, 0xc4, 0x73, + 0x1e, 0x86, 0x55, 0x1c, 0x6e, 0xa9, 0xd8, 0x65, 0x13, 0x97, 0xc9, 0x00, 0x84, 0xa0, 0x60, 0x0e, + 0x2a, 0xae, 0x89, 0x78, 0x7b, 0x9b, 0xaf, 0x07, 0x67, 0x60, 0x19, 0xc6, 0xb9, 0x40, 0x05, 0x2a, + 0x76, 0x50, 0xbc, 0x43, 0x8a, 0x31, 0x03, 0xa3, 0xd7, 0xd0, 0x32, 0xd5, 0xbe, 0x74, 0x91, 0xbc, + 0xcb, 0xaf, 0x41, 0x08, 0x85, 0xb2, 0x2e, 0xe3, 0xc6, 0xa6, 0x9b, 0xe1, 0x3d, 0x0e, 0x25, 0x33, + 0xa8, 0x58, 0x84, 0xd1, 0xc8, 0x4b, 0xd2, 0x4d, 0x2f, 0x74, 0x4a, 0xc7, 0xfb, 0xe4, 0x18, 0x29, + 0x21, 0x8a, 0x48, 0x16, 0xf7, 0xa3, 0xf9, 0x80, 0x23, 0x62, 0x60, 0x74, 0xf5, 0x52, 0xed, 0xd5, + 0x43, 0x59, 0xeb, 0xc7, 0xf6, 0x21, 0x5f, 0xbd, 0x82, 0x5d, 0x31, 0x8d, 0x73, 0x50, 0x49, 0x83, + 0x33, 0x4e, 0x9a, 0x8f, 0x38, 0xd3, 0x39, 0x80, 0xf0, 0x83, 0x70, 0x63, 0xcf, 0x36, 0xe1, 0x20, + 0xfb, 0x98, 0x64, 0xd3, 0x3d, 0x5a, 0x05, 0x95, 0x84, 0x7e, 0x95, 0x9f, 0x70, 0x49, 0x90, 0x5d, + 0xae, 0x55, 0x98, 0xca, 0xe2, 0xd4, 0xdb, 0xe8, 0x2f, 0x6a, 0x9f, 0x72, 0xd4, 0x0a, 0xb6, 0x23, + 0x6a, 0x27, 0x60, 0x9a, 0x8c, 0xfd, 0xe5, 0xf5, 0x33, 0x2e, 0xac, 0x05, 0xbd, 0xde, 0x99, 0xdd, + 0x87, 0xe1, 0x40, 0x19, 0xce, 0xd3, 0x5a, 0xc6, 0x29, 0x32, 0xb5, 0xc8, 0x6b, 0x39, 0x98, 0xaf, + 0x90, 0x99, 0x2b, 0xfe, 0x52, 0x29, 0x58, 0xf1, 0x5a, 0x28, 0x7f, 0x00, 0xf6, 0xb3, 0x3c, 0x8b, + 0x13, 0xd9, 0x50, 0x7e, 0x1c, 0x9c, 0x91, 0x4d, 0x07, 0xf5, 0xe7, 0x5d, 0xa9, 0x5a, 0x37, 0x70, + 0x34, 0x1f, 0x87, 0xeb, 0xca, 0x59, 0xa5, 0x16, 0x44, 0x2d, 0x95, 0x68, 0x8b, 0xf1, 0x0b, 0xce, + 0x54, 0xc9, 0x1d, 0xcf, 0x31, 0xb1, 0x04, 0x63, 0xf9, 0x8f, 0xae, 0x47, 0xf2, 0x4b, 0x12, 0x8d, + 0xb6, 0x29, 0x2a, 0x1c, 0x0d, 0x15, 0xb5, 0xbc, 0xc4, 0xa5, 0xfe, 0x7d, 0xc5, 0x85, 0x83, 0x10, + 0x2a, 0x1c, 0x7a, 0xab, 0x25, 0xb1, 0xdb, 0x3b, 0x18, 0xbe, 0xe6, 0xc2, 0xc1, 0x0c, 0x29, 0x78, + 0x60, 0x70, 0x50, 0x7c, 0xc3, 0x0a, 0x66, 0x50, 0x71, 0x6f, 0xbb, 0xd1, 0x26, 0xd2, 0x0f, 0x52, + 0x9d, 0x78, 0xb8, 0xda, 0xa2, 0xfa, 0x76, 0xbb, 0x73, 0x08, 0x5b, 0x33, 0x50, 0xac, 0x44, 0x91, + 0x4c, 0x53, 0xcf, 0x97, 0x38, 0x71, 0x38, 0x6c, 0xec, 0x3b, 0xae, 0x44, 0x06, 0x56, 0xdc, 0xcf, + 0xf1, 0xae, 0x59, 0xa5, 0x7a, 0xcb, 0x1e, 0xd1, 0x4a, 0xc1, 0xb0, 0xeb, 0xf1, 0x1d, 0x72, 0x75, + 0x8e, 0x2a, 0xe2, 0x2e, 0x3c, 0x40, 0x9d, 0x03, 0x85, 0x5d, 0x76, 0x76, 0xa7, 0x3c, 0x43, 0x1d, + 0xf3, 0x84, 0x38, 0x06, 0xa3, 0x1d, 0xc3, 0x84, 0x5d, 0xf5, 0x04, 0xa9, 0x46, 0xcc, 0x59, 0x42, + 0x1c, 0x82, 0x01, 0x1c, 0x0c, 0xec, 0xf8, 0x93, 0x84, 0xe7, 0xcb, 0xc5, 0x11, 0x18, 0xe2, 0x81, + 0xc0, 0x8e, 0x3e, 0x45, 0x68, 0x89, 0x20, 0xce, 0xc3, 0x80, 0x1d, 0x7f, 0x9a, 0x71, 0x46, 0x10, + 0x77, 0x0f, 0xe1, 0xf7, 0xcf, 0x0e, 0x50, 0x41, 0xe7, 0xd8, 0xcd, 0xc1, 0x20, 0x4d, 0x01, 0x76, + 0xfa, 0x19, 0xfa, 0xe7, 0x4c, 0x88, 0x3b, 0x60, 0x9f, 0x63, 0xc0, 0x9f, 0x23, 0xb4, 0x58, 0x2f, + 0x16, 0x61, 0xd8, 0xe8, 0xfc, 0x76, 0xfc, 0x79, 0xc2, 0x4d, 0x0a, 0xb7, 0x4e, 0x9d, 0xdf, 0x2e, + 0x78, 0x81, 0xb7, 0x4e, 0x04, 0x86, 0x8d, 0x9b, 0xbe, 0x9d, 0x7e, 0x91, 0xa3, 0xce, 0x88, 0x98, + 0x87, 0x4a, 0x59, 0xc8, 0xed, 0xfc, 0x4b, 0xc4, 0xb7, 0x19, 0x8c, 0x80, 0xd1, 0x48, 0xec, 0x8a, + 0x97, 0x39, 0x02, 0x06, 0x85, 0xd7, 0xa8, 0x7b, 0x38, 0xb0, 0x9b, 0x5e, 0xe1, 0x6b, 0xd4, 0x35, + 0x1b, 0x60, 0x36, 0xf3, 0x7a, 0x6a, 0x57, 0xbc, 0xca, 0xd9, 0xcc, 0xd7, 0xe3, 0x36, 0xba, 0xbb, + 0xad, 0xdd, 0xf1, 0x1a, 0x6f, 0xa3, 0xab, 0xd9, 0x8a, 0x55, 0xa8, 0xee, 0xed, 0xb4, 0x76, 0xdf, + 0xeb, 0xe4, 0x9b, 0xd8, 0xd3, 0x68, 0xc5, 0xfd, 0x30, 0xdd, 0xbb, 0xcb, 0xda, 0xad, 0xe7, 0x76, + 0xba, 0x3e, 0x17, 0x99, 0x4d, 0x56, 0x9c, 0x68, 0x97, 0x6b, 0xb3, 0xc3, 0xda, 0xb5, 0xe7, 0x77, + 0x3a, 0x2b, 0xb6, 0xd9, 0x60, 0xc5, 0x02, 0x40, 0xbb, 0xb9, 0xd9, 0x5d, 0x17, 0xc8, 0x65, 0x40, + 0x78, 0x35, 0xa8, 0xb7, 0xd9, 0xf9, 0x8b, 0x7c, 0x35, 0x88, 0xc0, 0xab, 0xc1, 0x6d, 0xcd, 0x4e, + 0x5f, 0xe2, 0xab, 0xc1, 0x08, 0x9e, 0x6c, 0xa3, 0x73, 0xd8, 0x0d, 0x97, 0xf9, 0x64, 0x1b, 0x94, + 0x98, 0x83, 0xa1, 0x38, 0x0b, 0x43, 0x3c, 0xa0, 0xd5, 0x9b, 0x7b, 0xb4, 0x2b, 0x19, 0x36, 0x99, + 0xff, 0x6d, 0x97, 0x76, 0xc0, 0x80, 0x38, 0x04, 0xfb, 0x64, 0x54, 0x97, 0x4d, 0x1b, 0xf9, 0xfb, + 0x2e, 0x17, 0x25, 0x5c, 0x2d, 0xe6, 0x01, 0x8a, 0x8f, 0xf6, 0xf8, 0x2a, 0x36, 0xf6, 0x8f, 0xdd, + 0xe2, 0x5b, 0x06, 0x03, 0x69, 0x0b, 0xf2, 0x17, 0xb7, 0x08, 0xb6, 0x3b, 0x05, 0xf9, 0x5b, 0x1f, + 0x86, 0xc1, 0x47, 0x52, 0x15, 0x6b, 0xcf, 0xb7, 0xd1, 0x7f, 0x12, 0xcd, 0xeb, 0x31, 0x60, 0x91, + 0x4a, 0xa4, 0xf6, 0xfc, 0xd4, 0xc6, 0xfe, 0x45, 0x6c, 0x09, 0x20, 0xdc, 0xf0, 0x52, 0xed, 0xf2, + 0xde, 0x7f, 0x33, 0xcc, 0x00, 0x6e, 0x1a, 0x9f, 0x1f, 0x95, 0x5b, 0x36, 0xf6, 0x1f, 0xde, 0x34, + 0xad, 0x17, 0x47, 0xa0, 0x82, 0x8f, 0xf9, 0xb7, 0x22, 0x36, 0xf8, 0x5f, 0x82, 0xdb, 0x04, 0xfe, + 0xe7, 0x54, 0x37, 0x75, 0x60, 0x0f, 0xf6, 0x7f, 0x94, 0x69, 0x5e, 0x2f, 0x16, 0x60, 0x38, 0xd5, + 0xcd, 0x66, 0x46, 0xf3, 0x95, 0x05, 0xff, 0x7f, 0xb7, 0xfc, 0xc8, 0x5d, 0x32, 0x47, 0x97, 0x60, + 0xb2, 0xa1, 0xa2, 0x6e, 0xf0, 0x28, 0x2c, 0xab, 0x65, 0xb5, 0x9a, 0x5f, 0xc5, 0x87, 0x6e, 0xf3, + 0x03, 0xbd, 0x99, 0xd5, 0x67, 0x1a, 0x2a, 0x9a, 0xc5, 0xc1, 0xb7, 0xfd, 0x7d, 0x5e, 0x39, 0x06, + 0x5f, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x51, 0xf0, 0xa5, 0x95, 0x02, 0x14, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 6b851c5..22910c6 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -334,9 +334,6 @@ func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google } func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - if IsProto3(file) { - return false - } return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) } @@ -355,3 +352,7 @@ func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_proto func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) } + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go index 5d4cba4..a26b046 100644 --- a/vendor/github.com/gogo/protobuf/proto/clone.go +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -89,7 +113,7 @@ func mergeStruct(out, in reflect.Value) { bIn := emIn.GetExtensions() bOut := emOut.GetExtensions() *bOut = append(*bOut, *bIn...) - } else if emIn, ok := extendable(in.Addr().Interface()); ok { + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 0000000..2455248 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 737f273..d9aa3c4 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,541 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { - if isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - ext := e.GetExtensions() - *ext = append(*ext, o.buf[oi:o.index]...) - } - continue - } - } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go deleted file mode 100644 index 6fb74de..0000000 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ /dev/null @@ -1,172 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "reflect" -) - -// Decode a reference to a struct pointer. -func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is a pointer receiver") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - bas := structPointer_FieldPointer(base, p.field) - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers ([]struct). -func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { - newBas := appendStructPointer(base, p.field, p.sstype) - - if is_group { - panic("not supported, maybe in future, if requested.") - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - panic("not supported, since this is not a pointer receiver.") - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) - - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of references to struct pointers. -func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_ref_struct(p, false, base) -} - -func setPtrCustomType(base structPointer, f field, v interface{}) { - if v == nil { - return - } - structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) -} - -func setCustomType(base structPointer, f field, value interface{}) { - if value == nil { - return - } - v := reflect.ValueOf(value).Elem() - t := reflect.TypeOf(value).Elem() - kind := t.Kind() - switch kind { - case reflect.Slice: - slice := reflect.MakeSlice(t, v.Len(), v.Cap()) - reflect.Copy(slice, v) - oldHeader := structPointer_GetSliceHeader(base, f) - oldHeader.Data = slice.Pointer() - oldHeader.Len = v.Len() - oldHeader.Cap = v.Cap() - default: - size := reflect.TypeOf(value).Elem().Size() - structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) - } -} - -func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - setPtrCustomType(base, p.field, custom) - return nil -} - -func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - if custom != nil { - setCustomType(base, p.field, custom) - } - return nil -} - -// Decode a slice of bytes ([]byte) into a slice of custom types. -func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - i := reflect.New(p.ctype.Elem()).Interface() - custom := (i).(Unmarshaler) - if err := custom.Unmarshal(b); err != nil { - return err - } - newBas := appendStructPointer(base, p.field, p.ctype) - - var zero field - setCustomType(newBas, zero, custom) - - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go index bd0e3bb..fe1bd7d 100644 --- a/vendor/github.com/gogo/protobuf/proto/discard.go +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -35,8 +35,14 @@ import ( "fmt" "reflect" "strings" + "sync" + "sync/atomic" ) +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -49,9 +55,202 @@ import ( // For proto2 messages, the unknown fields of message extensions are only // discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. discardLegacy(m) } +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + func discardLegacy(m Message) { v := reflect.ValueOf(m) if v.Kind() != reflect.Ptr || v.IsNil() { @@ -139,7 +338,7 @@ func discardLegacy(m Message) { // For proto2 messages, only discard unknown fields in message extensions // that have been accessed via GetExtension. - if em, ok := extendable(m); ok { + if em, err := extendable(m); err == nil { // Ignore lock since discardLegacy is not concurrency safe. emm, _ := em.extensionsRead() for _, mx := range emm { diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go index 18e2a5f..e748e17 100644 --- a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -47,157 +47,3 @@ func (*duration) String() string { return "duration" } func init() { RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") } - -func (o *Buffer) decDuration() (time.Duration, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return 0, err - } - dproto := &duration{} - if err := Unmarshal(b, dproto); err != nil { - return 0, err - } - return durationFromProto(dproto) -} - -func (o *Buffer) dec_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) - return nil -} - -func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) - var zero field - setPtrCustomType(newBas, zero, &d) - return nil -} - -func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { - d, err := o.decDuration() - if err != nil { - return err - } - structPointer_Word64Slice(base, p.field).Append(uint64(d)) - return nil -} - -func size_duration(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_duration(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - dur := structPointer_Interface(structp, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_duration(p *Properties, base structPointer) (n int) { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - size := Size(d) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { - dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) - d := durationProto(*dur) - data, err := Marshal(d) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return 0 - } - dproto := durationProto(*durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - if durs[i] == nil { - return errRepeatedHasNil - } - dproto := durationProto(*durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_duration(p *Properties, base structPointer) (n int) { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - size := Size(dproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { - pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) - durs := *pdurs - for i := 0; i < len(durs); i++ { - dproto := durationProto(durs[i]) - data, err := Marshal(dproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 8b84d1b..c27d35f 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -39,7 +39,6 @@ import ( "errors" "fmt" "reflect" - "sort" ) // RequiredNotSetError is the error returned if Marshal is called with @@ -82,10 +81,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +114,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +153,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +165,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +181,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +190,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +198,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +219,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 32111b7..0f5fb17 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -3,11 +3,6 @@ // Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// http://github.com/golang/protobuf/ -// // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -18,9 +13,6 @@ // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT @@ -36,315 +28,6 @@ package proto -import ( - "reflect" -) - func NewRequiredNotSetError(field string) *RequiredNotSetError { return &RequiredNotSetError{field} } - -type Sizer interface { - Size() int -} - -func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, s...) - return nil -} - -func size_ext_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return 0 - } - n += len(s) - return -} - -// Encode a reference to bool pointer. -func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - x := 0 - if v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_bool(p *Properties, base structPointer) int { - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode a reference to int32 pointer. -func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_ref_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a reference to an int64 pointer. -func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_ref_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a reference to a string pointer. -func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_ref_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - n += len(p.tagcode) - n += sizeStringBytes(v) - return -} - -// Encode a reference to a message struct. -func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -//TODO this is only copied, please fix this -func size_ref_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetRefStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a slice of references to message struct pointers ([]struct). -func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { - var state errorState - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - } - return state.err -} - -//TODO this is only copied, please fix this -func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) - l := ss.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := ss.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += len(p.tagcode) - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return ErrNil - } - custom := i.(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceRef(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { - custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - if data == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { - n += len(p.tagcode) - i := structPointer_InterfaceAt(base, p.field, p.ctype) - if i == nil { - return 0 - } - custom := i.(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - return -} - -func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return ErrNil - } - slice := reflect.ValueOf(inter) - l := slice.Len() - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, err := custom.Marshal() - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { - inter := structPointer_InterfaceRef(base, p.field, p.ctype) - if inter == nil { - return 0 - } - slice := reflect.ValueOf(inter) - l := slice.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - v := slice.Index(i) - custom := v.Interface().(Marshaler) - data, _ := custom.Marshal() - n += sizeRawBytes(data) - } - return -} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go index 2ed1cf5..d4db5a1 100644 --- a/vendor/github.com/gogo/protobuf/proto/equal.go +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go index 0dfcb53..44ebd45 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -69,12 +70,6 @@ type extendableProtoV1 interface { ExtensionMap() map[int32]Extension } -type extensionsBytes interface { - Message - ExtensionRangeArray() []ExtensionRange - GetExtensions() *[]byte -} - // extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. type extensionAdapter struct { extendableProtoV1 @@ -97,14 +92,31 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -149,16 +161,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -type extensionRange interface { - Message - ExtensionRangeArray() []ExtensionRange -} - -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() -var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() -var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -198,8 +200,8 @@ func SetRawExtension(base Message, id int32, b []byte) { *ext = append(*ext, b...) return } - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -207,7 +209,7 @@ func SetRawExtension(base Message, id int32, b []byte) { } // isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extensionRange, field int32) bool { +func isExtensionField(pb extendableProto, field int32) bool { for _, er := range pb.ExtensionRangeArray() { if er.Start <= field && field <= er.End { return true @@ -223,8 +225,11 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { if ea, ok := pbi.(extensionAdapter); ok { pbi = ea.extendableProtoV1 } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -269,80 +274,6 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { if epb, doki := pb.(extensionsBytes); doki { @@ -366,8 +297,8 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -375,46 +306,26 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } -func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { - ext := pb.GetExtensions() - for offset < len(*ext) { - tag, n1 := DecodeVarint((*ext)[offset:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - n2, err := size((*ext)[offset+n1:], wireType) - if err != nil { - panic(err) - } - newOffset := offset + n1 + n2 - if fieldNum == theFieldNum { - *ext = append((*ext)[:offset], (*ext)[newOffset:]...) - return offset - } - offset = newOffset - } - return -1 -} - // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { clearExtension(pb, extension.Field) } func clearExtension(pb Message, fieldNum int32) { - if epb, doki := pb.(extensionsBytes); doki { + if epb, ok := pb.(extensionsBytes); ok { offset := 0 for offset != -1 { offset = deleteExtension(epb, fieldNum, offset) } return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -422,39 +333,33 @@ func clearExtension(pb Message, fieldNum int32) { delete(extmap, fieldNum) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { if epb, doki := pb.(extensionsBytes); doki { ext := epb.GetExtensions() - o := 0 - for o < len(*ext) { - tag, n := DecodeVarint((*ext)[o:]) - fieldNum := int32(tag >> 3) - wireType := int(tag & 0x7) - l, err := size((*ext)[o+n:], wireType) - if err != nil { - return nil, err - } - if int32(fieldNum) == extension.Field { - v, err := decodeExtension((*ext)[o:o+n+l], extension) - if err != nil { - return nil, err - } - return v, nil - } - o += n + l - } - return defaultExtensionValue(extension) + return decodeExtensionFromBytes(extension, *ext) } - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") - } - if err := checkExtensionTypes(epb, extension); err != nil { + + epb, err := extendable(pb) + if err != nil { return nil, err } + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + emap, mu := epb.extensionsRead() if emap == nil { return defaultExtensionValue(extension) @@ -479,6 +384,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -496,6 +406,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -530,31 +445,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -564,9 +476,13 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } extensions = make([]interface{}, len(es)) for i, e := range es { - extensions[i], err = GetExtension(pb, e) + extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } @@ -581,9 +497,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -610,23 +526,18 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - if epb, doki := pb.(extensionsBytes); doki { - ClearExtension(pb, extension) - ext := epb.GetExtensions() - et := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - p := NewBuffer(nil) - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { return err } - *ext = append(*ext, p.buf...) + bb := epb.GetExtensions() + *bb = append(*bb, newb...) return nil } - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -656,8 +567,8 @@ func ClearAllExtensions(pb Message) { *ext = []byte{} return } - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go index ea6478f..53ebd8c 100644 --- a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -32,12 +32,36 @@ import ( "bytes" "errors" "fmt" + "io" "reflect" "sort" "strings" "sync" ) +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { if reflect.ValueOf(pb).IsNil() { return ifnotset @@ -56,19 +80,28 @@ func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool } func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } return bytes.Equal(this.enc, that.enc) } func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } return bytes.Compare(this.enc, that.enc) } func SizeOfInternalExtension(m extendableProto) (n int) { - return SizeOfExtensionMap(m.extensionsWrite()) -} - -func SizeOfExtensionMap(m map[int32]Extension) (n int) { - return extensionsMapSize(m) + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) } type sortableMapElem struct { @@ -122,28 +155,26 @@ func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) } func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { - if err := encodeExtensionsMap(m); err != nil { - return 0, err - } - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - for _, k := range keys { - n += copy(data[n:], m[int32(k)].enc) + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n } - return n, nil + return o, nil } func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { - if m[id].value == nil || m[id].desc == nil { - return m[id].enc, nil - } - if err := encodeExtensionsMap(m); err != nil { + e := m[id] + if err := e.Encode(); err != nil { return nil, err } - return m[id].enc, nil + return e.enc, nil } func size(buf []byte, wire int) (int, error) { @@ -218,35 +249,58 @@ func AppendExtension(e Message, tag int32, buf []byte) { } } -func encodeExtension(e *Extension) error { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - return nil +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l } - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } } - e.enc = p.buf return nil } func (this Extension) GoString() string { - if this.enc == nil { - if err := encodeExtension(&this); err != nil { - panic(err) - } + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) } return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) } @@ -292,3 +346,23 @@ func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { pb := extendable.(extendableProto) return pb.extensionsWrite() } + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index c98d73d..0f1950c 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +909,13 @@ const GoGoProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go index 4b4f7c9..b3aa391 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -33,6 +33,14 @@ import ( "strconv" ) +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { s, ok := m[value] if !ok { diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go index fd982de..3b6ca41 100644 --- a/vendor/github.com/gogo/protobuf/proto/message_set.go +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go index fb512e2..b6cad90 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go index 1763a5f..7ffd3c2 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,11 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. package proto @@ -34,52 +38,22 @@ import ( "reflect" ) -func structPointer_FieldPointer(p structPointer, f field) structPointer { - panic("not implemented") -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - panic("not implemented") -} - -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - panic("not implemented") -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - panic("not implemented") -} +// TODO: untested, so probably incorrect. -func structPointer_Add(p structPointer, size field) structPointer { - panic("not implemented") +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} } -func structPointer_Len(p structPointer, f field) int { - panic("not implemented") -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - panic("not implemented") -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - panic("not implemented") -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - panic("not implemented") -} - -type structRefSlice struct{} - -func (v *structRefSlice) Len() int { - panic("not implemented") +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func (v *structRefSlice) Index(i int) structPointer { - panic("not implemented") +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go index 6b5567d..d55a335 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index f156a29..b354101 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,92 +37,20 @@ import ( "unsafe" ) -func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - return r.Interface() +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} } -func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { - point := unsafe.Pointer(uintptr(p) + uintptr(f)) - r := reflect.NewAt(t, point) - if r.Elem().IsNil() { - return nil - } - return r.Elem().Interface() +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) } -func copyUintPtr(oldptr, newptr uintptr, size int) { - oldbytes := make([]byte, 0) - oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) - oldslice.Data = oldptr - oldslice.Len = size - oldslice.Cap = size - newbytes := make([]byte, 0) - newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) - newslice.Data = newptr - newslice.Len = size - newslice.Cap = size - copy(newbytes, oldbytes) -} - -func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { - copyUintPtr(uintptr(oldptr), uintptr(newptr), size) -} - -func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { - size := typ.Elem().Size() - - oldHeader := structPointer_GetSliceHeader(base, f) - oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() - newLen := oldHeader.Len + 1 - newSlice := reflect.MakeSlice(typ, newLen, newLen) - reflect.Copy(newSlice, oldSlice) - bas := toStructPointer(newSlice) - oldHeader.Data = uintptr(bas) - oldHeader.Len = newLen - oldHeader.Cap = newLen - - return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) -} - -func structPointer_FieldPointer(p structPointer, f field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { - return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) -} - -func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { - return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} - -func structPointer_Add(p structPointer, size field) structPointer { - return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) -} - -func structPointer_Len(p structPointer, f field) int { - return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) -} - -func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { - return &structRefSlice{p: p, f: f, size: size} -} - -// A structRefSlice represents a slice of structs (themselves submessages or groups). -type structRefSlice struct { - p structPointer - f field - size uintptr -} - -func (v *structRefSlice) Len() int { - return structPointer_Len(v.p, v.f) -} - -func (v *structRefSlice) Index(i int) structPointer { - ss := structPointer_GetStructPointer(v.p, v.f) - ss1 := structPointer_GetRefStructPointer(ss, 0) - return structPointer_Add(ss1, field(uintptr(i)*v.size)) +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice } diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 2a69e88..7a5e28e 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -63,42 +63,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -145,13 +109,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -197,36 +154,19 @@ type Properties struct { StdTime bool StdDuration bool - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sstype reflect.Type // set for slices of structs types only - ctype reflect.Type // set for custom types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -272,29 +212,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -309,6 +234,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -336,7 +262,7 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } case strings.HasPrefix(f, "embedded="): p.OrigName = strings.Split(f, "=")[1] @@ -352,292 +278,43 @@ func (p *Properties) Parse(s string) { } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { isMap := typ.Kind() == reflect.Map if len(p.CustomType) > 0 && !isMap { - p.setCustomEncAndDec(typ) + p.ctype = typ p.setTag(lockGetProp) return } if p.StdTime && !isMap { - p.setTimeEncAndDec(typ) p.setTag(lockGetProp) return } if p.StdDuration && !isMap { - p.setDurationEncAndDec(typ) p.setTag(lockGetProp) return } switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - } else { - p.enc = (*Buffer).enc_ref_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_ref_bool - } - case reflect.Int32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - } else { - p.enc = (*Buffer).enc_ref_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_int32 - } - case reflect.Uint32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_ref_uint32 - } - case reflect.Int64, reflect.Uint64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.Float32: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - } else { - p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_ref_uint32 - } - case reflect.Float64: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - } else { - p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_ref_int64 - } - case reflect.String: - if p.proto3 { - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - } else { - p.enc = (*Buffer).enc_ref_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_ref_string - } case reflect.Struct: p.stype = typ - p.isMarshaler = isMarshaler(typ) - p.isUnmarshaler = isUnmarshaler(typ) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_ref_struct_message - p.dec = (*Buffer).dec_ref_struct_message - p.size = size_ref_struct_message - } else { - fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) - } - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } - case reflect.Slice: switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string case reflect.Ptr: switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte + p.stype = t3 } case reflect.Struct: - p.setSliceOfNonPointerStructs(t1) + p.stype = t2 } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map p.mtype = t1 p.mkeyprop = &Properties{} @@ -659,20 +336,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } func (p *Properties) setTag(lockGetProp bool) { - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -683,20 +346,9 @@ func (p *Properties) setTag(lockGetProp bool) { } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -706,14 +358,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -763,10 +412,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) || - reflect.PtrTo(t).Implements(extendableBytesType) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -777,23 +422,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - if len(f.Tag.Get("protobuf")) > 0 { - p.enc = (*Buffer).enc_ext_slice_byte - p.dec = nil // not needed - p.size = size_ext_slice_byte - } else { - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { isOneofMessage = true @@ -809,9 +437,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -822,8 +447,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -873,30 +497,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -925,20 +525,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -954,7 +576,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index b6b7176..40ea3dd 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -1,6 +1,6 @@ // Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// Copyright (c) 2018, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -29,83 +29,8 @@ package proto import ( - "fmt" - "os" "reflect" ) -func (p *Properties) setCustomEncAndDec(typ reflect.Type) { - p.ctype = typ - if p.Repeated { - p.enc = (*Buffer).enc_custom_slice_bytes - p.dec = (*Buffer).dec_custom_slice_bytes - p.size = size_custom_slice_bytes - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_custom_bytes - p.dec = (*Buffer).dec_custom_bytes - p.size = size_custom_bytes - } else { - p.enc = (*Buffer).enc_custom_ref_bytes - p.dec = (*Buffer).dec_custom_ref_bytes - p.size = size_custom_ref_bytes - } -} - -func (p *Properties) setDurationEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_duration - p.dec = (*Buffer).dec_slice_duration - p.size = size_slice_duration - } else { - p.enc = (*Buffer).enc_slice_ref_duration - p.dec = (*Buffer).dec_slice_ref_duration - p.size = size_slice_ref_duration - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_duration - p.dec = (*Buffer).dec_duration - p.size = size_duration - } else { - p.enc = (*Buffer).enc_ref_duration - p.dec = (*Buffer).dec_ref_duration - p.size = size_ref_duration - } -} - -func (p *Properties) setTimeEncAndDec(typ reflect.Type) { - if p.Repeated { - if typ.Elem().Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_slice_time - p.dec = (*Buffer).dec_slice_time - p.size = size_slice_time - } else { - p.enc = (*Buffer).enc_slice_ref_time - p.dec = (*Buffer).dec_slice_ref_time - p.size = size_slice_ref_time - } - } else if typ.Kind() == reflect.Ptr { - p.enc = (*Buffer).enc_time - p.dec = (*Buffer).dec_time - p.size = size_time - } else { - p.enc = (*Buffer).enc_ref_time - p.dec = (*Buffer).dec_ref_time - p.size = size_ref_time - } - -} - -func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { - t2 := typ.Elem() - p.sstype = typ - p.stype = t2 - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - p.enc = (*Buffer).enc_slice_ref_struct_message - p.dec = (*Buffer).dec_slice_ref_struct_message - p.size = size_slice_ref_struct_message - if p.Wire != "bytes" { - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) - } -} +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 0000000..255e7b5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,2799 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + if deterministic { + return nil, errors.New("proto: deterministic not supported by the Marshal method of " + u.typ.String()) + } + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + } + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 0000000..997f57c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 0000000..f520106 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000..910e2dd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2048 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) + if err != nil { + panic("protobuf tag field not an integer: " + tagstr) + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(tag, of.field, unmarshal, 0) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0) + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 0000000..00d6c7a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index f609d1d..4f5706d 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -57,7 +57,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -177,11 +176,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -276,6 +270,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -447,12 +445,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } if len(props.Enum) > 0 { if err := tm.writeEnum(w, fv, props); err != nil { @@ -475,7 +467,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { pv = reflect.New(sv.Type()) pv.Elem().Set(sv) } - if pv.Type().Implements(extensionRangeType) { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -484,27 +476,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -605,6 +576,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -613,8 +597,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index f127672..fbb000d 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -212,7 +212,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -283,60 +282,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -734,6 +720,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -983,7 +972,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -1001,13 +990,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go index d427647..38439fa 100644 --- a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -47,183 +47,3 @@ func (*timestamp) String() string { return "timestamp" } func init() { RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") } - -func (o *Buffer) decTimestamp() (time.Time, error) { - b, err := o.DecodeRawBytes(true) - if err != nil { - return time.Time{}, err - } - tproto := ×tamp{} - if err := Unmarshal(b, tproto); err != nil { - return time.Time{}, err - } - return timestampFromProto(tproto) -} - -func (o *Buffer) dec_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setPtrCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - setCustomType(base, p.field, &t) - return nil -} - -func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) - var zero field - setPtrCustomType(newBas, zero, &t) - return nil -} - -func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { - t, err := o.decTimestamp() - if err != nil { - return err - } - newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) - var zero field - setCustomType(newBas, zero, &t) - return nil -} - -func size_time(p *Properties, base structPointer) (n int) { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_time(p *Properties, base structPointer) error { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - tim := structPointer_Interface(structp, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_ref_time(p *Properties, base structPointer) (n int) { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return 0 - } - size := Size(t) - return size + sizeVarint(uint64(size)) + len(p.tagcode) -} - -func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { - tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) - t, err := timestampProto(*tim) - if err != nil { - return err - } - data, err := Marshal(t) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return nil -} - -func size_slice_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return 0 - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - if tims[i] == nil { - return errRepeatedHasNil - } - tproto, err := timestampProto(*tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} - -func size_slice_ref_time(p *Properties, base structPointer) (n int) { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return 0 - } - size := Size(tproto) - n += len(p.tagcode) + size + sizeVarint(uint64(size)) - } - return n -} - -func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { - ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) - tims := *ptims - for i := 0; i < len(tims); i++ { - tproto, err := timestampProto(tims[i]) - if err != nil { - return err - } - data, err := Marshal(tproto) - if err != nil { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - } - return nil -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 4174cbd..44f893b 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -1,35 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ package descriptor import proto "github.com/gogo/protobuf/proto" @@ -139,7 +110,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{4, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -179,7 +150,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{4, 1} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -220,7 +191,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { return nil } func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{10, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10, 0} } type FieldOptions_CType int32 @@ -260,7 +231,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{12, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 0} } type FieldOptions_JSType int32 @@ -302,7 +273,7 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{12, 1} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12, 1} } // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, @@ -344,20 +315,41 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -390,14 +382,35 @@ type FileDescriptorProto struct { SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} } +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -496,14 +509,35 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} } +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -576,19 +610,38 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{2, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) } +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + func (m *DescriptorProto_ExtensionRange) GetStart() int32 { if m != nil && m.Start != nil { return *m.Start @@ -614,18 +667,37 @@ func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{2, 1} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) } +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + func (m *DescriptorProto_ReservedRange) GetStart() int32 { if m != nil && m.Start != nil { return *m.Start @@ -643,14 +715,18 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{3} +} var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -659,6 +735,23 @@ var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ExtensionRangeOptions } +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -697,15 +790,36 @@ type FieldDescriptorProto struct { // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) } -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -779,15 +893,36 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) } -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -814,14 +949,35 @@ type EnumDescriptorProto struct { ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved enum value names, which may not be reused. A given name may only // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -865,17 +1021,36 @@ func (m *EnumDescriptorProto) GetReservedName() []string { // is inclusive such that it can appropriately represent the entire int32 // domain. type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{6, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) } +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -893,19 +1068,38 @@ func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { // Describes a value within an enum. type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{7} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) } +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { return *m.Name @@ -929,16 +1123,37 @@ func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) } -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -972,14 +1187,35 @@ type MethodDescriptorProto struct { // Identifies if client streams multiple client messages ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -1046,7 +1282,7 @@ type FileOptions struct { // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -1100,16 +1336,21 @@ type FileOptions struct { // is empty. When this option is empty, the package name will be used for // determining the namespace. PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. See above. + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{10} +} var extRange_FileOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1118,6 +1359,23 @@ var extRange_FileOptions = []proto.ExtensionRange{ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false @@ -1150,6 +1408,7 @@ func (m *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } +// Deprecated: Do not use. func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash @@ -1315,14 +1574,18 @@ type MessageOptions struct { MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{11} +} var extRange_MessageOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1331,6 +1594,23 @@ var extRange_MessageOptions = []proto.ExtensionRange{ func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1433,14 +1713,18 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{12} +} var extRange_FieldOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1449,6 +1733,23 @@ var extRange_FieldOptions = []proto.ExtensionRange{ func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL @@ -1508,14 +1809,18 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{13} +} var extRange_OneofOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1524,6 +1829,23 @@ var extRange_OneofOptions = []proto.ExtensionRange{ func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OneofOptions } +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -1543,14 +1865,18 @@ type EnumOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{14} +} var extRange_EnumOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1559,6 +1885,23 @@ var extRange_EnumOptions = []proto.ExtensionRange{ func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo const Default_EnumOptions_Deprecated bool = false @@ -1591,14 +1934,18 @@ type EnumValueOptions struct { Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{15} +} var extRange_EnumValueOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1607,6 +1954,23 @@ var extRange_EnumValueOptions = []proto.ExtensionRange{ func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo const Default_EnumValueOptions_Deprecated bool = false @@ -1632,14 +1996,18 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{16} +} var extRange_ServiceOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1648,6 +2016,23 @@ var extRange_ServiceOptions = []proto.ExtensionRange{ func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo const Default_ServiceOptions_Deprecated bool = false @@ -1674,14 +2059,18 @@ type MethodOptions struct { IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{17} +} var extRange_MethodOptions = []proto.ExtensionRange{ {Start: 1000, End: 536870911}, @@ -1690,6 +2079,23 @@ var extRange_MethodOptions = []proto.ExtensionRange{ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo const Default_MethodOptions_Deprecated bool = false const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN @@ -1725,19 +2131,40 @@ type UninterpretedOption struct { Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1794,18 +2221,37 @@ func (m *UninterpretedOption) GetAggregateValue() string { // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{18, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) } +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + func (m *UninterpretedOption_NamePart) GetNamePart() string { if m != nil && m.NamePart != nil { return *m.NamePart @@ -1866,14 +2312,35 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1963,15 +2430,34 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{19, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) } +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -2014,14 +2500,35 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { type GeneratedCodeInfo struct { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) } -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -2042,16 +2549,35 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{20, 0} + return fileDescriptor_descriptor_9588782fb9cbecd6, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) } +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { if m != nil { @@ -2117,9 +2643,9 @@ func init() { proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_descriptor_9588782fb9cbecd6) } -var fileDescriptorDescriptor = []byte{ +var fileDescriptor_descriptor_9588782fb9cbecd6 = []byte{ // 2487 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f, diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index 3b95a77..ec6eb16 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -1,43 +1,15 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ package descriptor import fmt "fmt" import strings "strings" -import proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" import sort "sort" import strconv "strconv" import reflect "reflect" +import proto "github.com/gogo/protobuf/proto" import math "math" // Reference imports to suppress errors if they are not otherwise used. @@ -752,8 +724,8 @@ func valueToGoStringDescriptor(v interface{}, typ string) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func extensionToGoStringDescriptor(m proto.Message) string { - e := proto.GetUnsafeExtensionsMap(m) +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) if e == nil { return "nil" } diff --git a/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/gogo/protobuf/types/any.go index d83c3ad..df4787d 100644 --- a/vendor/github.com/gogo/protobuf/types/any.go +++ b/vendor/github.com/gogo/protobuf/types/any.go @@ -129,10 +129,12 @@ func UnmarshalAny(any *Any, pb proto.Message) error { // Is returns true if any value contains a given message type. func Is(any *Any, pb proto.Message) bool { - aname, err := AnyMessageName(any) - if err != nil { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { return false } - - return aname == proto.MessageName(pb) + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name } diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go index 4b5f570..b991823 100644 --- a/vendor/github.com/gogo/protobuf/types/any.pb.go +++ b/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -1,15 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: any.proto +// source: google/protobuf/any.proto -/* - Package types is a generated protocol buffer package. - - It is generated from these files: - any.proto - - It has these top-level messages: - Any -*/ package types import proto "github.com/gogo/protobuf/proto" @@ -115,17 +106,18 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // } // type Any struct { - // A URL/resource name whose content describes the type of the - // serialized protocol buffer message. + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). // - // For URLs which use the scheme `http`, `https`, or no scheme, the - // following restrictions and interpretations apply: + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: // // * If no scheme is provided, `https` is assumed. - // * The last segment of the URL's path must represent the fully - // qualified name of the type (as in `path/google.protobuf.Duration`). - // The name should be in a canonical form (e.g., leading "." is - // not accepted). // * An HTTP GET on the URL must yield a [google.protobuf.Type][] // value in binary format, or produce an error. // * Applications are allowed to cache lookup results based on the @@ -134,18 +126,53 @@ type Any struct { // on changes to types. (Use versioned type names to manage // breaking changes.) // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_8eec716d227a06dd, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return m.Size() +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) } -func (m *Any) Reset() { *m = Any{} } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorAny, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +var xxx_messageInfo_Any proto.InternalMessageInfo func (m *Any) GetTypeUrl() string { if m != nil { @@ -161,6 +188,9 @@ func (m *Any) GetValue() []byte { return nil } +func (*Any) XXX_MessageName() string { + return "google.protobuf.Any" +} func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } @@ -198,6 +228,9 @@ func (this *Any) Compare(that interface{}) int { if c := bytes.Compare(this.Value, that1.Value); c != 0 { return c } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *Any) Equal(that interface{}) bool { @@ -225,6 +258,9 @@ func (this *Any) Equal(that interface{}) bool { if !bytes.Equal(this.Value, that1.Value) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Any) GoString() string { @@ -235,6 +271,9 @@ func (this *Any) GoString() string { s = append(s, "&types.Any{") s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -273,6 +312,9 @@ func (m *Any) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -294,6 +336,7 @@ func NewPopulatedAny(r randyAny, easy bool) *Any { this.Value[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedAny(r, 3) } return this } @@ -381,6 +424,9 @@ func (m *Any) Size() (n int) { if l > 0 { n += 1 + l + sovAny(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -404,6 +450,7 @@ func (this *Any) String() string { s := strings.Join([]string{`&Any{`, `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -517,6 +564,7 @@ func (m *Any) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -631,21 +679,22 @@ var ( ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("any.proto", fileDescriptorAny) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_8eec716d227a06dd) } -var fileDescriptorAny = []byte{ - // 204 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xcc, 0xab, 0xd4, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, - 0x4a, 0xd3, 0x94, 0xcc, 0xb8, 0x98, 0x1d, 0xf3, 0x2a, 0x85, 0x24, 0xb9, 0x38, 0x4a, 0x2a, 0x0b, - 0x52, 0xe3, 0x4b, 0x8b, 0x72, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xd8, 0x41, 0xfc, 0xd0, - 0xa2, 0x1c, 0x21, 0x11, 0x2e, 0xd6, 0xb2, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, - 0x9e, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, - 0x94, 0x63, 0xfc, 0xf1, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, - 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, - 0x1f, 0x40, 0xe2, 0x8f, 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x77, 0xe2, - 0x70, 0xcc, 0xab, 0x0c, 0x00, 0x71, 0x02, 0x18, 0xa3, 0x58, 0x41, 0x36, 0x16, 0x2f, 0x62, 0x62, - 0x76, 0x0f, 0x70, 0x5a, 0xc5, 0x24, 0xe7, 0x0e, 0x51, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, - 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, 0x17, 0x02, 0x52, 0x96, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, - 0x00, 0x00, 0xff, 0xff, 0xb7, 0x39, 0x2f, 0x89, 0xdd, 0x00, 0x00, 0x00, +var fileDescriptor_any_8eec716d227a06dd = []byte{ + // 216 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0x6a, 0x66, 0xbc, 0xf0, 0x50, + 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x7f, 0x3c, 0x94, 0x63, 0x6c, 0x78, + 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, + 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, 0x07, 0x90, 0xf8, 0x63, 0x39, 0xc6, 0x13, 0x8f, + 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xdc, 0xe0, 0xc4, 0xe1, 0x98, 0x57, 0x19, + 0x00, 0xe2, 0x04, 0x30, 0x46, 0xb1, 0x82, 0xac, 0x2d, 0x5e, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, + 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x34, 0x00, 0xaa, 0x54, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, + 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x2c, 0x89, 0x0d, 0x6c, 0x86, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, + 0x19, 0x7c, 0x7c, 0x94, 0xf2, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 0000000..395c9b6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,2058 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { + return fileDescriptor_api_658bf9e68d9b66a3, []int{0} +} +func (m *Api) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Api.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Api) XXX_Merge(src proto.Message) { + xxx_messageInfo_Api.Merge(dst, src) +} +func (m *Api) XXX_Size() int { + return m.Size() +} +func (m *Api) XXX_DiscardUnknown() { + xxx_messageInfo_Api.DiscardUnknown(m) +} + +var xxx_messageInfo_Api proto.InternalMessageInfo + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +func (*Api) XXX_MessageName() string { + return "google.protobuf.Api" +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { + return fileDescriptor_api_658bf9e68d9b66a3, []int{1} +} +func (m *Method) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Method.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Method) XXX_Merge(src proto.Message) { + xxx_messageInfo_Method.Merge(dst, src) +} +func (m *Method) XXX_Size() int { + return m.Size() +} +func (m *Method) XXX_DiscardUnknown() { + xxx_messageInfo_Method.DiscardUnknown(m) +} + +var xxx_messageInfo_Method proto.InternalMessageInfo + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +func (*Method) XXX_MessageName() string { + return "google.protobuf.Method" +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { + return fileDescriptor_api_658bf9e68d9b66a3, []int{2} +} +func (m *Mixin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mixin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Mixin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mixin.Merge(dst, src) +} +func (m *Mixin) XXX_Size() int { + return m.Size() +} +func (m *Mixin) XXX_DiscardUnknown() { + xxx_messageInfo_Mixin.DiscardUnknown(m) +} + +var xxx_messageInfo_Mixin proto.InternalMessageInfo + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func (*Mixin) XXX_MessageName() string { + return "google.protobuf.Mixin" +} +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Methods) > 0 { + for _, msg := range m.Methods { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Version) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if m.SourceContext != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintApi(dAtA, i, uint64(m.SourceContext.Size())) + n1, err := m.SourceContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Mixins) > 0 { + for _, msg := range m.Mixins { + dAtA[i] = 0x32 + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Syntax != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.RequestTypeUrl) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i += copy(dAtA[i:], m.RequestTypeUrl) + } + if m.RequestStreaming { + dAtA[i] = 0x18 + i++ + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.ResponseTypeUrl) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i += copy(dAtA[i:], m.ResponseTypeUrl) + } + if m.ResponseStreaming { + dAtA[i] = 0x28 + i++ + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x32 + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Syntax != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Root) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i += copy(dAtA[i:], m.Root) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(10) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(10) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(10) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(10) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(10) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 3) + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Method) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mixin) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + strings.Replace(fmt.Sprintf("%v", this.Methods), "Method", "Method", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + strings.Replace(fmt.Sprintf("%v", this.Mixins), "Mixin", "Mixin", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_api_658bf9e68d9b66a3) } + +var fileDescriptor_api_658bf9e68d9b66a3 = []byte{ + // 472 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xf3, 0xee, 0x92, 0x4b, 0x71, 0x45, 0x0a, 0x46, 0x02, 0x93, 0xc1, 0x3a, 0x55, 0x0c, + 0x27, 0x10, 0x17, 0x51, 0x3e, 0x41, 0x83, 0x50, 0x07, 0x84, 0x88, 0x2e, 0x20, 0x24, 0x96, 0x28, + 0x0d, 0x26, 0x9c, 0x74, 0x67, 0x1f, 0xb6, 0x03, 0xc9, 0x86, 0xc4, 0x37, 0x61, 0x42, 0x8c, 0x7c, + 0x03, 0xb6, 0x8e, 0x1d, 0x19, 0xc9, 0x75, 0x61, 0xec, 0xc8, 0x88, 0xec, 0x3b, 0x37, 0x25, 0x0d, + 0x12, 0xdd, 0xfc, 0xde, 0xff, 0xe7, 0xbf, 0xdf, 0xfb, 0x1b, 0xdd, 0x9e, 0x0a, 0x31, 0xcd, 0x58, + 0xaf, 0x90, 0x42, 0x8b, 0xc3, 0xd9, 0x9b, 0xde, 0xb8, 0x48, 0x63, 0x5b, 0xe0, 0x9d, 0x4a, 0x8a, + 0x9d, 0xd4, 0xbd, 0xb3, 0xce, 0x2a, 0x31, 0x93, 0x13, 0x36, 0x9a, 0x08, 0xae, 0xd9, 0x5c, 0x57, + 0x60, 0xb7, 0xbb, 0x4e, 0xe9, 0x45, 0x51, 0x9b, 0xec, 0x7e, 0xf7, 0x90, 0xbf, 0x5f, 0xa4, 0x18, + 0xa3, 0x26, 0x1f, 0xe7, 0x8c, 0x40, 0x08, 0xd1, 0x95, 0xc4, 0x9e, 0xf1, 0x03, 0xd4, 0xce, 0x99, + 0x7e, 0x2b, 0x5e, 0x2b, 0xe2, 0x85, 0x7e, 0xb4, 0xbd, 0x77, 0x2b, 0x5e, 0x1b, 0x20, 0x7e, 0x6a, + 0xf5, 0xc4, 0x71, 0xe6, 0x8a, 0x28, 0x74, 0x2a, 0xb8, 0x22, 0xfe, 0x3f, 0xae, 0x3c, 0xb3, 0x7a, + 0xe2, 0x38, 0x4c, 0x50, 0xfb, 0x3d, 0x93, 0x2a, 0x15, 0x9c, 0x34, 0xed, 0xe3, 0xae, 0xc4, 0x8f, + 0x51, 0xe7, 0xef, 0x7d, 0x48, 0x2b, 0x84, 0x68, 0x7b, 0x8f, 0x5e, 0xf0, 0x1c, 0x5a, 0xec, 0x51, + 0x45, 0x25, 0x57, 0xd5, 0xf9, 0x12, 0xc7, 0x28, 0xc8, 0xd3, 0x79, 0xca, 0x15, 0x09, 0xec, 0x48, + 0x37, 0x2f, 0x6e, 0x61, 0xe4, 0xa4, 0xa6, 0x70, 0x0f, 0x05, 0x6a, 0xc1, 0xf5, 0x78, 0x4e, 0xda, + 0x21, 0x44, 0x9d, 0x0d, 0x2b, 0x0c, 0xad, 0x9c, 0xd4, 0xd8, 0xee, 0x37, 0x0f, 0x05, 0x55, 0x10, + 0x1b, 0x63, 0x8c, 0xd0, 0x35, 0xc9, 0xde, 0xcd, 0x98, 0xd2, 0x23, 0x13, 0xfc, 0x68, 0x26, 0x33, + 0xe2, 0x59, 0xbd, 0x53, 0xf7, 0x9f, 0x2f, 0x0a, 0xf6, 0x42, 0x66, 0xf8, 0x1e, 0xba, 0xee, 0x48, + 0xa5, 0x25, 0x1b, 0xe7, 0x29, 0x9f, 0x12, 0x3f, 0x84, 0x68, 0x2b, 0x71, 0x16, 0x43, 0xd7, 0xc7, + 0x77, 0x0d, 0xac, 0x0a, 0xc1, 0x15, 0x5b, 0xf9, 0x56, 0x09, 0xee, 0x38, 0xc1, 0x19, 0xdf, 0x47, + 0xf8, 0x8c, 0x5d, 0x39, 0xb7, 0xac, 0xf3, 0x99, 0xcb, 0xca, 0xfa, 0xdc, 0x2f, 0x06, 0xff, 0xf9, + 0x8b, 0x97, 0x0e, 0xad, 0x87, 0x5a, 0x36, 0xf6, 0x8d, 0x91, 0x61, 0xd4, 0x94, 0x42, 0xe8, 0x3a, + 0x26, 0x7b, 0xee, 0x7f, 0x82, 0xe3, 0x25, 0x6d, 0xfc, 0x58, 0xd2, 0xc6, 0xe9, 0x92, 0xc2, 0xef, + 0x25, 0x85, 0x8f, 0x25, 0x85, 0x2f, 0x25, 0x85, 0xa3, 0x92, 0xc2, 0x71, 0x49, 0xe1, 0x67, 0x49, + 0xe1, 0x57, 0x49, 0x1b, 0xa7, 0xa6, 0x7f, 0x42, 0xe1, 0xe8, 0x84, 0x02, 0xba, 0x31, 0x11, 0xf9, + 0xfa, 0x2c, 0xfd, 0xad, 0xfd, 0x22, 0x1d, 0x98, 0x62, 0x00, 0xaf, 0x5a, 0x26, 0x3c, 0xf5, 0xd9, + 0xf3, 0x0f, 0x06, 0xfd, 0xaf, 0x1e, 0x3d, 0xa8, 0xd0, 0x81, 0x1b, 0xfb, 0x25, 0xcb, 0xb2, 0x27, + 0x5c, 0x7c, 0xe0, 0x26, 0x4b, 0x75, 0x18, 0x58, 0x8f, 0x87, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x20, 0x4e, 0x03, 0x8e, 0xa6, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go index ee9deac..39bb7a2 100644 --- a/vendor/github.com/gogo/protobuf/types/duration.pb.go +++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -1,21 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: duration.proto +// source: google/protobuf/duration.proto -/* - Package types is a generated protocol buffer package. - - It is generated from these files: - duration.proto - - It has these top-level messages: - Duration -*/ package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import bytes "bytes" + import strings "strings" import reflect "reflect" @@ -103,13 +96,44 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorDuration, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_7f04bf66a647e6f6, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo func (m *Duration) GetSeconds() int64 { if m != nil { @@ -125,6 +149,9 @@ func (m *Duration) GetNanos() int32 { return 0 } +func (*Duration) XXX_MessageName() string { + return "google.protobuf.Duration" +} func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } @@ -165,6 +192,9 @@ func (this *Duration) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *Duration) Equal(that interface{}) bool { @@ -192,6 +222,9 @@ func (this *Duration) Equal(that interface{}) bool { if this.Nanos != that1.Nanos { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Duration) GoString() string { @@ -202,6 +235,9 @@ func (this *Duration) GoString() string { s = append(s, "&types.Duration{") s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -238,6 +274,9 @@ func (m *Duration) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -259,6 +298,9 @@ func (m *Duration) Size() (n int) { if m.Nanos != 0 { n += 1 + sovDuration(uint64(m.Nanos)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -354,6 +396,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -468,21 +511,24 @@ var ( ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("duration.proto", fileDescriptorDuration) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_7f04bf66a647e6f6) +} -var fileDescriptorDuration = []byte{ - // 203 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, - 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, - 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, - 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, - 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, - 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe5, 0x18, - 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x76, 0xe2, 0x85, 0x59, 0x1c, 0x00, 0x12, 0x09, - 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d, - 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4b, 0x00, 0x54, 0x8b, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, - 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x2c, 0x63, 0x40, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x9d, 0x5a, 0x25, 0xa5, 0xe6, 0x00, 0x00, 0x00, +var fileDescriptor_duration_7f04bf66a647e6f6 = []byte{ + // 215 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0xd4, 0xcc, 0x78, 0xe1, 0xa1, + 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, + 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, + 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, + 0xe7, 0xea, 0xa1, 0xd9, 0xef, 0xc4, 0x0b, 0xb3, 0x3d, 0x00, 0x24, 0x12, 0xc0, 0x18, 0xc5, 0x5a, + 0x52, 0x59, 0x90, 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, + 0x39, 0x77, 0x88, 0x96, 0x00, 0xa8, 0x16, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, + 0xbc, 0x10, 0x90, 0xca, 0x24, 0x36, 0xb0, 0x59, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, + 0xb1, 0xa3, 0x66, 0xfb, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go index e7018b9..db58ac6 100644 --- a/vendor/github.com/gogo/protobuf/types/empty.pb.go +++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -1,21 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: empty.proto +// source: google/protobuf/empty.proto -/* -Package types is a generated protocol buffer package. - -It is generated from these files: - empty.proto - -It has these top-level messages: - Empty -*/ package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import bytes "bytes" + import strings "strings" import reflect "reflect" @@ -42,13 +35,47 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // // The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Empty) Reset() { *m = Empty{} } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptorEmpty, []int{0} } -func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_empty_fa64318be3e23895, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(dst, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo +func (*Empty) XXX_MessageName() string { + return "google.protobuf.Empty" +} func init() { proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") } @@ -77,6 +104,9 @@ func (this *Empty) Compare(that interface{}) int { } else if this == nil { return -1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *Empty) Equal(that interface{}) bool { @@ -98,6 +128,9 @@ func (this *Empty) Equal(that interface{}) bool { } else if this == nil { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Empty) GoString() string { @@ -106,6 +139,9 @@ func (this *Empty) GoString() string { } s := make([]string, 0, 4) s = append(s, "&types.Empty{") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -132,6 +168,9 @@ func (m *Empty) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -147,6 +186,7 @@ func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { this := &Empty{} if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedEmpty(r, 1) } return this } @@ -226,6 +266,9 @@ func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { func (m *Empty) Size() (n int) { var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -247,6 +290,7 @@ func (this *Empty) String() string { return "nil" } s := strings.Join([]string{`&Empty{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -300,6 +344,7 @@ func (m *Empty) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -414,19 +459,20 @@ var ( ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("empty.proto", fileDescriptorEmpty) } +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_fa64318be3e23895) } -var fileDescriptorEmpty = []byte{ - // 169 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2d, 0x28, - 0xa9, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, - 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xd8, 0xb9, 0x58, 0x5d, 0x41, 0xf2, 0x4e, 0x2d, 0x8c, 0x17, 0x1e, - 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0xe3, 0x8f, 0x87, 0x72, 0x8c, 0x0d, - 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, - 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0x00, 0x12, 0x7f, 0x2c, 0xc7, 0xc8, 0x25, - 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0xa0, 0x13, 0x17, 0xd8, 0xb8, 0x00, 0x10, 0x37, 0x80, 0x31, - 0x8a, 0xb5, 0xa4, 0xb2, 0x20, 0xb5, 0xf8, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, - 0x55, 0x4c, 0x72, 0xee, 0x10, 0xf5, 0x01, 0x50, 0xf5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, - 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x95, 0x49, 0x6c, 0x60, 0x83, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, - 0xff, 0x7c, 0xa8, 0xf0, 0xc4, 0xb6, 0x00, 0x00, 0x00, +var fileDescriptor_empty_fa64318be3e23895 = []byte{ + // 180 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x07, 0xe3, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, + 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, + 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, + 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, + 0x66, 0xa8, 0x13, 0x17, 0xd8, 0xc8, 0x00, 0x10, 0x37, 0x80, 0x31, 0x8a, 0xb5, 0xa4, 0xb2, 0x20, + 0xb5, 0xf8, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0xf5, 0x01, 0x50, 0xf5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, + 0x95, 0x49, 0x6c, 0x60, 0x83, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x07, 0x8c, 0xf8, 0x26, + 0xca, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go index 22e8b4f..13d6176 100644 --- a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go +++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -1,21 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: field_mask.proto +// source: google/protobuf/field_mask.proto -/* -Package types is a generated protocol buffer package. - -It is generated from these files: - field_mask.proto - -It has these top-level messages: - FieldMask -*/ package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import bytes "bytes" + import strings "strings" import reflect "reflect" @@ -233,14 +226,51 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // // Note that oneof type names ("test_oneof" in this case) cannot be used in // paths. +// +// ## Field Mask Verification +// +// The implementation of the all the API methods, which have any FieldMask type +// field in the request, should verify the included field paths, and return +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. type FieldMask struct { // The set of field mask paths. - Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` + Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldMask) Reset() { *m = FieldMask{} } -func (*FieldMask) ProtoMessage() {} -func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptorFieldMask, []int{0} } +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_field_mask_3abe20b2f0d4cb1c, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(dst, src) +} +func (m *FieldMask) XXX_Size() int { + return m.Size() +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo func (m *FieldMask) GetPaths() []string { if m != nil { @@ -249,6 +279,9 @@ func (m *FieldMask) GetPaths() []string { return nil } +func (*FieldMask) XXX_MessageName() string { + return "google.protobuf.FieldMask" +} func init() { proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") } @@ -291,6 +324,9 @@ func (this *FieldMask) Compare(that interface{}) int { return 1 } } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *FieldMask) Equal(that interface{}) bool { @@ -320,6 +356,9 @@ func (this *FieldMask) Equal(that interface{}) bool { return false } } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *FieldMask) GoString() string { @@ -329,6 +368,9 @@ func (this *FieldMask) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.FieldMask{") s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -370,6 +412,9 @@ func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -390,6 +435,7 @@ func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { this.Paths[i] = string(randStringFieldMask(r)) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2) } return this } @@ -475,6 +521,9 @@ func (m *FieldMask) Size() (n int) { n += 1 + l + sovFieldMask(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -497,6 +546,7 @@ func (this *FieldMask) String() string { } s := strings.Join([]string{`&FieldMask{`, `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -579,6 +629,7 @@ func (m *FieldMask) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -693,21 +744,23 @@ var ( ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("field_mask.proto", fileDescriptorFieldMask) } +func init() { + proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_field_mask_3abe20b2f0d4cb1c) +} -var fileDescriptorFieldMask = []byte{ - // 193 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xcb, 0x4c, 0xcd, - 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, - 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x14, 0xb9, 0x38, 0xdd, 0x40, 0x8a, - 0x7c, 0x13, 0x8b, 0xb3, 0x85, 0x44, 0xb8, 0x58, 0x0b, 0x12, 0x4b, 0x32, 0x8a, 0x25, 0x18, 0x15, - 0x98, 0x35, 0x38, 0x83, 0x20, 0x1c, 0xa7, 0x56, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, - 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, - 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, - 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, 0x43, - 0xb3, 0xca, 0x89, 0x0f, 0x6e, 0x51, 0x00, 0x48, 0x28, 0x80, 0x31, 0x8a, 0xb5, 0xa4, 0xb2, 0x20, - 0xb5, 0x78, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0x86, 0x00, 0xa8, - 0x06, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, 0xb2, 0x24, 0x36, - 0xb0, 0x49, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x31, 0x89, 0xb5, 0xd6, 0x00, 0x00, - 0x00, +var fileDescriptor_field_mask_3abe20b2f0d4cb1c = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x9d, 0x8c, + 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0xe3, 0x8f, 0x87, 0x72, + 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, + 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0x00, 0x12, 0x7f, 0x2c, 0xc7, + 0x78, 0xe2, 0xb1, 0x1c, 0x23, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x75, 0x4e, 0x7c, 0x70, + 0xcb, 0x02, 0x40, 0x42, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, + 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x34, 0x04, 0x40, 0x35, 0xe8, 0x85, 0xa7, 0xe6, + 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x32, 0x06, 0x04, + 0x00, 0x00, 0xff, 0xff, 0xea, 0xa6, 0x08, 0xbf, 0xea, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 0000000..0c1daf7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,535 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { + return fileDescriptor_source_context_d25fd312302631f7, []int{0} +} +func (m *SourceContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *SourceContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceContext.Merge(dst, src) +} +func (m *SourceContext) XXX_Size() int { + return m.Size() +} +func (m *SourceContext) XXX_DiscardUnknown() { + xxx_messageInfo_SourceContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceContext proto.InternalMessageInfo + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (*SourceContext) XXX_MessageName() string { + return "google.protobuf.SourceContext" +} +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.FileName) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i += copy(dAtA[i:], m.FileName) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSourceContext(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_source_context_d25fd312302631f7) +} + +var fileDescriptor_source_context_d25fd312302631f7 = []byte{ + // 216 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x7a, 0x19, 0x2f, 0x3c, 0x94, 0x63, + 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, + 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, + 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, + 0x46, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0x34, 0x9b, 0x9d, 0x84, 0x50, 0xec, 0x0d, 0x00, 0x09, + 0x07, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0x2f, 0x62, 0x62, 0x76, 0x0f, 0x70, 0x5a, + 0xc5, 0x24, 0xe7, 0x0e, 0xd1, 0x14, 0x00, 0xd5, 0xa4, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, + 0x5f, 0x9e, 0x17, 0x02, 0x52, 0x96, 0xc4, 0x06, 0x36, 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, + 0x86, 0x8b, 0x02, 0xb9, 0xfd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go index 7d5372b..659f2e5 100644 --- a/vendor/github.com/gogo/protobuf/types/struct.pb.go +++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -1,17 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: struct.proto +// source: google/protobuf/struct.proto -/* - Package types is a generated protocol buffer package. - - It is generated from these files: - struct.proto - - It has these top-level messages: - Struct - Value - ListValue -*/ package types import proto "github.com/gogo/protobuf/proto" @@ -20,11 +9,13 @@ import math "math" import strconv "strconv" +import bytes "bytes" + import strings "strings" import reflect "reflect" -import sortkeys "github.com/gogo/protobuf/sortkeys" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" -import binary "encoding/binary" +import encoding_binary "encoding/binary" import io "io" @@ -57,8 +48,10 @@ var NullValue_value = map[string]int32{ "NULL_VALUE": 0, } -func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } -func (NullValue) XXX_WellKnownType() string { return "NullValue" } +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_struct_e8dc68d36b73896c, []int{0} +} +func (NullValue) XXX_WellKnownType() string { return "NullValue" } // `Struct` represents a structured data value, consisting of fields // which map to dynamically typed values. In some languages, `Struct` @@ -70,13 +63,44 @@ func (NullValue) XXX_WellKnownType() string { return "NullValue" } // The JSON representation for `Struct` is JSON object. type Struct struct { // Unordered map of dynamically typed values. - Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_e8dc68d36b73896c, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(dst, src) +} +func (m *Struct) XXX_Size() int { + return m.Size() +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) } -func (m *Struct) Reset() { *m = Struct{} } -func (*Struct) ProtoMessage() {} -func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } -func (*Struct) XXX_WellKnownType() string { return "Struct" } +var xxx_messageInfo_Struct proto.InternalMessageInfo func (m *Struct) GetFields() map[string]*Value { if m != nil { @@ -85,6 +109,10 @@ func (m *Struct) GetFields() map[string]*Value { return nil } +func (*Struct) XXX_MessageName() string { + return "google.protobuf.Struct" +} + // `Value` represents a dynamically typed value which can be either // null, a number, a string, a boolean, a recursive struct value, or a // list of values. A producer of value is expected to set one of that @@ -101,13 +129,44 @@ type Value struct { // *Value_BoolValue // *Value_StructValue // *Value_ListValue - Kind isValue_Kind `protobuf_oneof:"kind"` + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Value) Reset() { *m = Value{} } -func (*Value) ProtoMessage() {} -func (*Value) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{1} } -func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_e8dc68d36b73896c, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(dst, src) +} +func (m *Value) XXX_Size() int { + return m.Size() +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo type isValue_Kind interface { isValue_Kind() @@ -297,26 +356,26 @@ func _Value_OneofSizer(msg proto.Message) (n int) { // kind switch x := m.Kind.(type) { case *Value_NullValue: - n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += 1 // tag and wire n += proto.SizeVarint(uint64(x.NullValue)) case *Value_NumberValue: - n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 1 // tag and wire n += 8 case *Value_StringValue: - n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(len(x.StringValue))) n += len(x.StringValue) case *Value_BoolValue: - n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 // tag and wire n += 1 case *Value_StructValue: s := proto.Size(x.StructValue) - n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case *Value_ListValue: s := proto.Size(x.ListValue) - n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s case nil: @@ -326,18 +385,53 @@ func _Value_OneofSizer(msg proto.Message) (n int) { return n } +func (*Value) XXX_MessageName() string { + return "google.protobuf.Value" +} + // `ListValue` is a wrapper around a repeated field of values. // // The JSON representation for `ListValue` is JSON array. type ListValue struct { // Repeated field of dynamically typed values. - Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ListValue) Reset() { *m = ListValue{} } -func (*ListValue) ProtoMessage() {} -func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{2} } -func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_struct_e8dc68d36b73896c, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(dst, src) +} +func (m *ListValue) XXX_Size() int { + return m.Size() +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo func (m *ListValue) GetValues() []*Value { if m != nil { @@ -346,8 +440,12 @@ func (m *ListValue) GetValues() []*Value { return nil } +func (*ListValue) XXX_MessageName() string { + return "google.protobuf.ListValue" +} func init() { proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") proto.RegisterType((*Value)(nil), "google.protobuf.Value") proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) @@ -386,6 +484,9 @@ func (this *Struct) Equal(that interface{}) bool { return false } } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Value) Equal(that interface{}) bool { @@ -416,6 +517,9 @@ func (this *Value) Equal(that interface{}) bool { } else if !this.Kind.Equal(that1.Kind) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Value_NullValue) Equal(that interface{}) bool { @@ -589,6 +693,9 @@ func (this *ListValue) Equal(that interface{}) bool { return false } } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Struct) GoString() string { @@ -601,7 +708,7 @@ func (this *Struct) GoString() string { for k := range this.Fields { keysForFields = append(keysForFields, k) } - sortkeys.Strings(keysForFields) + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) mapStringForFields := "map[string]*Value{" for _, k := range keysForFields { mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) @@ -610,6 +717,9 @@ func (this *Struct) GoString() string { if this.Fields != nil { s = append(s, "Fields: "+mapStringForFields+",\n") } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -622,6 +732,9 @@ func (this *Value) GoString() string { if this.Kind != nil { s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -682,6 +795,9 @@ func (this *ListValue) GoString() string { if this.Values != nil { s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -736,6 +852,9 @@ func (m *Struct) MarshalTo(dAtA []byte) (int, error) { } } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -761,6 +880,9 @@ func (m *Value) MarshalTo(dAtA []byte) (int, error) { } i += nn2 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -775,7 +897,7 @@ func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { i := 0 dAtA[i] = 0x11 i++ - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) i += 8 return i, nil } @@ -854,6 +976,9 @@ func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -876,6 +1001,7 @@ func NewPopulatedStruct(r randyStruct, easy bool) *Struct { } } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) } return this } @@ -898,6 +1024,7 @@ func NewPopulatedValue(r randyStruct, easy bool) *Value { this.Kind = NewPopulatedValue_ListValue(r, easy) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 7) } return this } @@ -945,6 +1072,7 @@ func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { } } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) } return this } @@ -1037,6 +1165,9 @@ func (m *Struct) Size() (n int) { n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1046,6 +1177,9 @@ func (m *Value) Size() (n int) { if m.Kind != nil { n += m.Kind.Size() } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1101,6 +1235,9 @@ func (m *ListValue) Size() (n int) { n += 1 + l + sovStruct(uint64(l)) } } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1125,7 +1262,7 @@ func (this *Struct) String() string { for k := range this.Fields { keysForFields = append(keysForFields, k) } - sortkeys.Strings(keysForFields) + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) mapStringForFields := "map[string]*Value{" for _, k := range keysForFields { mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) @@ -1133,6 +1270,7 @@ func (this *Struct) String() string { mapStringForFields += "}" s := strings.Join([]string{`&Struct{`, `Fields:` + mapStringForFields + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1143,6 +1281,7 @@ func (this *Value) String() string { } s := strings.Join([]string{`&Value{`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1213,6 +1352,7 @@ func (this *ListValue) String() string { } s := strings.Join([]string{`&ListValue{`, `Values:` + strings.Replace(fmt.Sprintf("%v", this.Values), "Value", "Value", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1389,6 +1529,7 @@ func (m *Struct) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1455,7 +1596,7 @@ func (m *Value) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} case 3: @@ -1584,6 +1725,7 @@ func (m *Value) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1665,6 +1807,7 @@ func (m *ListValue) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1779,35 +1922,38 @@ var ( ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("struct.proto", fileDescriptorStruct) } +func init() { + proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_e8dc68d36b73896c) +} -var fileDescriptorStruct = []byte{ - // 432 bytes of a gzipped FileDescriptorProto +var fileDescriptor_struct_e8dc68d36b73896c = []byte{ + // 443 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, - 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0x55, 0xa9, 0x0e, 0x09, 0xa2, 0x22, 0x1d, 0x51, 0xba, - 0x58, 0x08, 0x79, 0x08, 0x0b, 0x22, 0x2c, 0x58, 0x2a, 0xad, 0x84, 0x55, 0x19, 0x43, 0x8b, 0xc4, - 0x12, 0xe1, 0xd4, 0x8d, 0xac, 0x5e, 0xef, 0x2a, 0xfb, 0x0c, 0xca, 0x06, 0xff, 0x05, 0x33, 0x13, - 0x62, 0xe4, 0xaf, 0x60, 0xec, 0xc8, 0x88, 0x3d, 0x31, 0x76, 0xec, 0x88, 0xee, 0xce, 0x36, 0xa8, - 0x51, 0x36, 0xbf, 0xcf, 0xbf, 0xf7, 0xbd, 0xf7, 0xbd, 0xc3, 0xcd, 0x42, 0xe6, 0xe5, 0x5c, 0xfa, - 0x17, 0xb9, 0x90, 0x82, 0xdc, 0x5e, 0x08, 0xb1, 0x60, 0xa9, 0xa9, 0x92, 0xf2, 0x74, 0xfc, 0x05, - 0xd0, 0x79, 0xad, 0x09, 0x32, 0x45, 0xe7, 0x34, 0x4b, 0xd9, 0x49, 0x31, 0x84, 0x51, 0xcf, 0x73, - 0x27, 0xbb, 0xfe, 0x0d, 0xd8, 0x37, 0xa0, 0xff, 0x42, 0x53, 0x7b, 0x5c, 0xe6, 0xcb, 0xb8, 0x69, - 0xd9, 0x79, 0x85, 0xee, 0x7f, 0x32, 0xd9, 0xc6, 0xde, 0x59, 0xba, 0x1c, 0xc2, 0x08, 0xbc, 0x41, - 0xac, 0x3e, 0xc9, 0x23, 0xec, 0x7f, 0x78, 0xcf, 0xca, 0x74, 0x68, 0x8f, 0xc0, 0x73, 0x27, 0x77, - 0x57, 0xcc, 0x8f, 0xd5, 0xdf, 0xd8, 0x40, 0x4f, 0xed, 0x27, 0x30, 0xfe, 0x61, 0x63, 0x5f, 0x8b, - 0x64, 0x8a, 0xc8, 0x4b, 0xc6, 0x66, 0xc6, 0x40, 0x99, 0x6e, 0x4d, 0x76, 0x56, 0x0c, 0x0e, 0x4b, - 0xc6, 0x34, 0x7f, 0x60, 0xc5, 0x03, 0xde, 0x16, 0x64, 0x17, 0x37, 0x79, 0x79, 0x9e, 0xa4, 0xf9, - 0xec, 0xdf, 0x7c, 0x38, 0xb0, 0x62, 0xd7, 0xa8, 0x1d, 0x54, 0xc8, 0x3c, 0xe3, 0x8b, 0x06, 0xea, - 0xa9, 0xc5, 0x15, 0x64, 0x54, 0x03, 0x3d, 0x40, 0x4c, 0x84, 0x68, 0xd7, 0xd8, 0x18, 0x81, 0x77, - 0x4b, 0x8d, 0x52, 0x9a, 0x01, 0x9e, 0xb5, 0xd7, 0x6e, 0x90, 0xbe, 0x8e, 0x7a, 0x6f, 0xcd, 0x1d, - 0x1b, 0xfb, 0x72, 0x2e, 0xbb, 0x94, 0x2c, 0x2b, 0xda, 0x5e, 0x47, 0xf7, 0xae, 0xa6, 0x0c, 0xb3, - 0x42, 0x76, 0x29, 0x59, 0x5b, 0x04, 0x0e, 0x6e, 0x9c, 0x65, 0xfc, 0x64, 0x3c, 0xc5, 0x41, 0x47, - 0x10, 0x1f, 0x1d, 0x6d, 0xd6, 0xbe, 0xe8, 0xba, 0xa3, 0x37, 0xd4, 0xc3, 0xfb, 0x38, 0xe8, 0x8e, - 0x48, 0xb6, 0x10, 0x0f, 0x8f, 0xc2, 0x70, 0x76, 0xfc, 0x3c, 0x3c, 0xda, 0xdb, 0xb6, 0x82, 0xcf, - 0x70, 0x59, 0x51, 0xeb, 0x57, 0x45, 0xad, 0xab, 0x8a, 0xc2, 0x75, 0x45, 0xe1, 0x53, 0x4d, 0xe1, - 0x5b, 0x4d, 0xe1, 0x67, 0x4d, 0xe1, 0xb2, 0xa6, 0xf0, 0xbb, 0xa6, 0xf0, 0xa7, 0xa6, 0xd6, 0x55, - 0x4d, 0x01, 0xef, 0xcc, 0xc5, 0xf9, 0xcd, 0x71, 0x81, 0x6b, 0x92, 0x47, 0xaa, 0x8e, 0xe0, 0x5d, - 0x5f, 0x2e, 0x2f, 0xd2, 0xe2, 0x1a, 0xe0, 0xab, 0xdd, 0xdb, 0x8f, 0x82, 0xef, 0x36, 0xdd, 0x37, - 0x0d, 0x51, 0xbb, 0xdf, 0xdb, 0x94, 0xb1, 0x97, 0x5c, 0x7c, 0xe4, 0x6f, 0x14, 0x99, 0x38, 0xda, - 0xe9, 0xf1, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x75, 0xc5, 0x1c, 0x3b, 0xd5, 0x02, 0x00, 0x00, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e, + 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x0a, 0x86, 0x16, 0x89, + 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33, + 0x13, 0x62, 0xe4, 0xaf, 0x60, 0xec, 0xc8, 0x88, 0xcd, 0xc2, 0xd8, 0xb1, 0x23, 0xba, 0x3b, 0xdb, + 0x54, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x0b, 0x31, 0x67, + 0xc9, 0xf6, 0x59, 0x26, 0xa4, 0x88, 0xd4, 0xf1, 0x76, 0x2e, 0x33, 0x15, 0xcb, 0xc0, 0xd4, 0xe4, + 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x34, + 0x61, 0x47, 0x79, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17, + 0x86, 0xda, 0xe1, 0x32, 0x5b, 0x84, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xaf, 0xc8, 0x64, 0x03, 0x5b, + 0x27, 0xc9, 0xa2, 0x0b, 0x3d, 0x18, 0x74, 0x42, 0xfd, 0x49, 0x1e, 0x61, 0xfb, 0xc3, 0x7b, 0xa6, + 0x92, 0xae, 0xdb, 0x83, 0x81, 0x3f, 0xbc, 0xb3, 0x64, 0x7e, 0xa8, 0x5f, 0x43, 0x0b, 0x3d, 0x75, + 0x9f, 0x40, 0xff, 0x87, 0x8b, 0x6d, 0x23, 0x92, 0x11, 0x22, 0x57, 0x8c, 0xcd, 0xac, 0x81, 0x36, + 0x5d, 0x1f, 0x6e, 0x2e, 0x19, 0xec, 0x2b, 0xc6, 0x0c, 0xbf, 0xe7, 0x84, 0x1d, 0x5e, 0x17, 0x64, + 0x0b, 0x6f, 0x72, 0x75, 0x1a, 0x25, 0xd9, 0xec, 0xff, 0x7c, 0xd8, 0x73, 0x42, 0xdf, 0xaa, 0x0d, + 0x94, 0xcb, 0x2c, 0xe5, 0xf3, 0x0a, 0x6a, 0xe9, 0xc5, 0x35, 0x64, 0x55, 0x0b, 0x3d, 0x40, 0x8c, + 0x84, 0xa8, 0xd7, 0x58, 0xeb, 0xc1, 0xe0, 0x86, 0x1e, 0xa5, 0x35, 0x0b, 0x3c, 0x33, 0x2e, 0x2a, + 0x96, 0x15, 0xd2, 0x36, 0x51, 0xef, 0xae, 0xb8, 0x63, 0x65, 0xaf, 0x62, 0xd9, 0xa4, 0x64, 0x69, + 0x5e, 0xf7, 0x7a, 0xa6, 0x77, 0x39, 0xe5, 0x24, 0xcd, 0x65, 0x93, 0x92, 0xd5, 0xc5, 0xd8, 0xc3, + 0xb5, 0x93, 0x94, 0x1f, 0xf5, 0x47, 0xd8, 0x69, 0x08, 0x12, 0xa0, 0x67, 0xcc, 0xea, 0x3f, 0xba, + 0xea, 0xe8, 0x15, 0xf5, 0xf0, 0x1e, 0x76, 0x9a, 0x23, 0x92, 0x75, 0xc4, 0xfd, 0x83, 0xc9, 0x64, + 0x76, 0xf8, 0x7c, 0x72, 0xb0, 0xb3, 0xe1, 0x8c, 0x3f, 0xc3, 0x79, 0x41, 0x9d, 0x5f, 0x05, 0x75, + 0x2e, 0x0a, 0x0a, 0x97, 0x05, 0x85, 0x4f, 0x25, 0x85, 0x6f, 0x25, 0x85, 0x9f, 0x25, 0x85, 0xf3, + 0x92, 0xc2, 0xef, 0x92, 0xc2, 0xdf, 0x92, 0x3a, 0x17, 0x5a, 0xfb, 0x43, 0x01, 0x6f, 0xc7, 0xe2, + 0xf4, 0xfa, 0xc8, 0xb1, 0x6f, 0xd3, 0x4f, 0x75, 0x3d, 0x85, 0x77, 0x6d, 0xb9, 0x38, 0x4b, 0xf2, + 0x4b, 0x80, 0xaf, 0x6e, 0x6b, 0x77, 0x3a, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x30, 0xad, 0x77, 0x7c, + 0x9b, 0x30, 0xf6, 0x92, 0x8b, 0x8f, 0xfc, 0x8d, 0x26, 0x23, 0xcf, 0x38, 0x3d, 0xfe, 0x17, 0x00, + 0x00, 0xff, 0xff, 0x9f, 0x67, 0xad, 0xcf, 0xe9, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go index 41b18f9..e047667 100644 --- a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go +++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -1,21 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: timestamp.proto +// source: google/protobuf/timestamp.proto -/* - Package types is a generated protocol buffer package. - - It is generated from these files: - timestamp.proto - - It has these top-level messages: - Timestamp -*/ package types import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import bytes "bytes" + import strings "strings" import reflect "reflect" @@ -95,7 +88,9 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional // seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), // are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required, though only UTC (as indicated by "Z") is presently supported. +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). // // For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past // 01:30 UTC on January 15, 2017. @@ -106,8 +101,8 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) -// to obtain a formatter capable of generating timestamps in this format. +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { @@ -119,13 +114,44 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorTimestamp, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_0a0a9bc758317e91, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -141,6 +167,9 @@ func (m *Timestamp) GetNanos() int32 { return 0 } +func (*Timestamp) XXX_MessageName() string { + return "google.protobuf.Timestamp" +} func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } @@ -181,6 +210,9 @@ func (this *Timestamp) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *Timestamp) Equal(that interface{}) bool { @@ -208,6 +240,9 @@ func (this *Timestamp) Equal(that interface{}) bool { if this.Nanos != that1.Nanos { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Timestamp) GoString() string { @@ -218,6 +253,9 @@ func (this *Timestamp) GoString() string { s = append(s, "&types.Timestamp{") s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -254,6 +292,9 @@ func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -275,6 +316,9 @@ func (m *Timestamp) Size() (n int) { if m.Nanos != 0 { n += 1 + sovTimestamp(uint64(m.Nanos)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -370,6 +414,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -484,21 +529,24 @@ var ( ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("timestamp.proto", fileDescriptorTimestamp) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_0a0a9bc758317e91) +} -var fileDescriptorTimestamp = []byte{ - // 205 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, - 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, - 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, - 0x83, 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, - 0x0d, 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x81, 0xf1, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, - 0x3e, 0x3c, 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, - 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, - 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0x2c, 0x77, 0xe2, 0x83, 0x5b, 0x1d, 0x00, - 0x12, 0x0a, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89, - 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4f, 0x00, 0x54, 0x8f, 0x5e, 0x78, 0x6a, - 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x30, 0x63, 0x40, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0xa2, 0x42, 0xda, 0xea, 0x00, 0x00, 0x00, +var fileDescriptor_timestamp_0a0a9bc758317e91 = []byte{ + // 216 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x0b, 0xe3, 0x85, + 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, + 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, + 0xc3, 0x87, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, 0xc8, 0x25, 0x9c, + 0x9c, 0x9f, 0xab, 0x87, 0xe6, 0x00, 0x27, 0x3e, 0xb8, 0xf5, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, + 0xd6, 0x92, 0xca, 0x82, 0xd4, 0xe2, 0x1f, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, + 0x31, 0xc9, 0xb9, 0x43, 0xf4, 0x04, 0x40, 0xf5, 0xe8, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, + 0x97, 0xe7, 0x85, 0x80, 0x54, 0x26, 0xb1, 0x81, 0x0d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, + 0x40, 0xae, 0xf1, 0x42, 0xfe, 0x00, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 0000000..5f406e0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,3228 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strconv "strconv" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + TYPE_DOUBLE Field_Kind = 1 + // Field type float. + TYPE_FLOAT Field_Kind = 2 + // Field type int64. + TYPE_INT64 Field_Kind = 3 + // Field type uint64. + TYPE_UINT64 Field_Kind = 4 + // Field type int32. + TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + TYPE_BOOL Field_Kind = 8 + // Field type string. + TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + TYPE_GROUP Field_Kind = 10 + // Field type message. + TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + TYPE_BYTES Field_Kind = 12 + // Field type uint32. + TYPE_UINT32 Field_Kind = 13 + // Field type enum. + TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{0} +} +func (m *Type) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Type.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Type) XXX_Merge(src proto.Message) { + xxx_messageInfo_Type.Merge(dst, src) +} +func (m *Type) XXX_Size() int { + return m.Size() +} +func (m *Type) XXX_DiscardUnknown() { + xxx_messageInfo_Type.DiscardUnknown(m) +} + +var xxx_messageInfo_Type proto.InternalMessageInfo + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +func (*Type) XXX_MessageName() string { + return "google.protobuf.Type" +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{1} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(dst, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (*Field) XXX_MessageName() string { + return "google.protobuf.Field" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{2} +} +func (m *Enum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Enum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Enum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Enum.Merge(dst, src) +} +func (m *Enum) XXX_Size() int { + return m.Size() +} +func (m *Enum) XXX_DiscardUnknown() { + xxx_messageInfo_Enum.DiscardUnknown(m) +} + +var xxx_messageInfo_Enum proto.InternalMessageInfo + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +func (*Enum) XXX_MessageName() string { + return "google.protobuf.Enum" +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{3} +} +func (m *EnumValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *EnumValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValue.Merge(dst, src) +} +func (m *EnumValue) XXX_Size() int { + return m.Size() +} +func (m *EnumValue) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValue.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValue proto.InternalMessageInfo + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (*EnumValue) XXX_MessageName() string { + return "google.protobuf.EnumValue" +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { + return fileDescriptor_type_345e3aff58b7b252, []int{4} +} +func (m *Option) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Option.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Option) XXX_Merge(src proto.Message) { + xxx_messageInfo_Option.Merge(dst, src) +} +func (m *Option) XXX_Size() int { + return m.Size() +} +func (m *Option) XXX_DiscardUnknown() { + xxx_messageInfo_Option.DiscardUnknown(m) +} + +var xxx_messageInfo_Option proto.InternalMessageInfo + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func (*Option) XXX_MessageName() string { + return "google.protobuf.Option" +} +func init() { + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) +} +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Fields) > 0 { + for _, msg := range m.Fields { + dAtA[i] = 0x12 + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x22 + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SourceContext != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintType(dAtA, i, uint64(m.SourceContext.Size())) + n1, err := m.SourceContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Syntax != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Kind != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + } + if m.Cardinality != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + } + if m.Number != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Number)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.TypeUrl) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i += copy(dAtA[i:], m.TypeUrl) + } + if m.OneofIndex != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + } + if m.Packed { + dAtA[i] = 0x40 + i++ + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x4a + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.JsonName) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i += copy(dAtA[i:], m.JsonName) + } + if len(m.DefaultValue) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i += copy(dAtA[i:], m.DefaultValue) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Enumvalue) > 0 { + for _, msg := range m.Enumvalue { + dAtA[i] = 0x12 + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SourceContext != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintType(dAtA, i, uint64(m.SourceContext.Size())) + n2, err := m.SourceContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Syntax != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Number != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Value.Size())) + n3, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(10) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(10) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(10) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 7) + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(10) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 12) + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(10) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(10) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(10) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 6) + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(10) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 4) + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(10) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 3) + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Enum) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnumValue) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Option) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovType(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Field", "Field", 1) + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + strings.Replace(fmt.Sprintf("%v", this.Enumvalue), "EnumValue", "EnumValue", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= (Field_Kind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= (Field_Cardinality(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthType + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipType(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_type_345e3aff58b7b252) } + +var fileDescriptor_type_345e3aff58b7b252 = []byte{ + // 844 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46, + 0x14, 0x66, 0x41, 0xc8, 0xe8, 0x61, 0xf0, 0x66, 0x93, 0x49, 0x14, 0x67, 0x46, 0x65, 0x68, 0x0f, + 0x4c, 0x0e, 0x78, 0x0a, 0x1e, 0x4f, 0xaf, 0x60, 0x64, 0xca, 0x98, 0x48, 0xea, 0x22, 0x9a, 0xb8, + 0x17, 0x06, 0x83, 0x9c, 0x21, 0x11, 0x2b, 0x06, 0x89, 0xd6, 0xdc, 0x7a, 0xeb, 0xa9, 0xff, 0x44, + 0x4f, 0x9d, 0x9e, 0xfb, 0x47, 0xf8, 0x98, 0x63, 0x8f, 0x35, 0xb9, 0xf4, 0x98, 0x63, 0x6e, 0xed, + 0xec, 0x0a, 0x64, 0xf1, 0xa3, 0x33, 0x6e, 0x73, 0xe3, 0x7d, 0xdf, 0xf7, 0x7e, 0xee, 0xd3, 0x03, + 0x0e, 0x5f, 0x7b, 0xde, 0x6b, 0xd7, 0x39, 0x9a, 0x4c, 0xbd, 0xc0, 0xbb, 0x9c, 0x5d, 0x1d, 0x05, + 0xf3, 0x89, 0x53, 0x16, 0x16, 0x39, 0x08, 0xb9, 0xf2, 0x8a, 0x3b, 0x7c, 0xba, 0x29, 0xee, 0xb3, + 0x79, 0xc8, 0x1e, 0x7e, 0xb1, 0x49, 0xf9, 0xde, 0x6c, 0x3a, 0x70, 0x7a, 0x03, 0x8f, 0x05, 0xce, + 0x75, 0x10, 0xaa, 0x8a, 0x3f, 0x27, 0x41, 0xb2, 0xe7, 0x13, 0x87, 0x10, 0x90, 0x58, 0x7f, 0xec, + 0xa8, 0xa8, 0x80, 0x4a, 0x0a, 0x15, 0xbf, 0x49, 0x19, 0xe4, 0xab, 0x91, 0xe3, 0x0e, 0x7d, 0x35, + 0x59, 0x48, 0x95, 0xb2, 0x95, 0xc7, 0xe5, 0x8d, 0xfc, 0xe5, 0x33, 0x4e, 0xd3, 0xa5, 0x8a, 0x3c, + 0x06, 0xd9, 0x63, 0x8e, 0x77, 0xe5, 0xab, 0xa9, 0x42, 0xaa, 0xa4, 0xd0, 0xa5, 0x45, 0xbe, 0x84, + 0x3d, 0x6f, 0x12, 0x8c, 0x3c, 0xe6, 0xab, 0x92, 0x08, 0xf4, 0x64, 0x2b, 0x90, 0x29, 0x78, 0xba, + 0xd2, 0x11, 0x1d, 0xf2, 0xeb, 0xf5, 0xaa, 0xe9, 0x02, 0x2a, 0x65, 0x2b, 0xda, 0x96, 0x67, 0x47, + 0xc8, 0x4e, 0x43, 0x15, 0xcd, 0xf9, 0x71, 0x93, 0x1c, 0x81, 0xec, 0xcf, 0x59, 0xd0, 0xbf, 0x56, + 0xe5, 0x02, 0x2a, 0xe5, 0x77, 0x24, 0xee, 0x08, 0x9a, 0x2e, 0x65, 0xc5, 0xdf, 0x65, 0x48, 0x8b, + 0xa6, 0xc8, 0x11, 0x48, 0x6f, 0x47, 0x6c, 0x28, 0x06, 0x92, 0xaf, 0x3c, 0xdb, 0xdd, 0x7a, 0xf9, + 0x7c, 0xc4, 0x86, 0x54, 0x08, 0x49, 0x03, 0xb2, 0x83, 0xfe, 0x74, 0x38, 0x62, 0x7d, 0x77, 0x14, + 0xcc, 0xd5, 0xa4, 0xf0, 0x2b, 0xfe, 0x8b, 0xdf, 0xe9, 0x9d, 0x92, 0xc6, 0xdd, 0xf8, 0x0c, 0xd9, + 0x6c, 0x7c, 0xe9, 0x4c, 0xd5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0x45, 0xef, 0x23, 0xc5, 0xde, + 0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x57, 0xf4, 0xa7, 0xd0, 0x3d, 0x6e, 0x77, 0xa7, + 0x2e, 0xf9, 0x0c, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0x3a, 0xd7, 0xea, 0x9e, 0x88, 0x05, 0x02, + 0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0x9d, 0xa1, 0x9a, 0x29, 0xa0, 0x52, 0x86, 0x2e, + 0xad, 0xf8, 0x5b, 0x29, 0xf7, 0x7c, 0xab, 0x67, 0xa0, 0xbc, 0xf1, 0x3d, 0xd6, 0x13, 0xf5, 0x81, + 0xa8, 0x23, 0xc3, 0x01, 0x83, 0xd7, 0xf8, 0x39, 0xe4, 0x86, 0xce, 0x55, 0x7f, 0xe6, 0x06, 0xbd, + 0xef, 0xfb, 0xee, 0xcc, 0x51, 0xb3, 0x42, 0xb0, 0xbf, 0x04, 0xbf, 0xe5, 0x58, 0xf1, 0x26, 0x09, + 0x12, 0x9f, 0x24, 0xc1, 0xb0, 0x6f, 0x5f, 0x58, 0x7a, 0xaf, 0x6b, 0x9c, 0x1b, 0xe6, 0x4b, 0x03, + 0x27, 0xc8, 0x01, 0x64, 0x05, 0xd2, 0x30, 0xbb, 0xf5, 0xb6, 0x8e, 0x11, 0xc9, 0x03, 0x08, 0xe0, + 0xac, 0x6d, 0xd6, 0x6c, 0x9c, 0x8c, 0xec, 0x96, 0x61, 0x9f, 0x1c, 0xe3, 0x54, 0xe4, 0xd0, 0x0d, + 0x01, 0x29, 0x2e, 0xa8, 0x56, 0x70, 0x3a, 0xca, 0x71, 0xd6, 0x7a, 0xa5, 0x37, 0x4e, 0x8e, 0xb1, + 0xbc, 0x8e, 0x54, 0x2b, 0x78, 0x8f, 0xe4, 0x40, 0x11, 0x48, 0xdd, 0x34, 0xdb, 0x38, 0x13, 0xc5, + 0xec, 0xd8, 0xb4, 0x65, 0x34, 0xb1, 0x12, 0xc5, 0x6c, 0x52, 0xb3, 0x6b, 0x61, 0x88, 0x22, 0xbc, + 0xd0, 0x3b, 0x9d, 0x5a, 0x53, 0xc7, 0xd9, 0x48, 0x51, 0xbf, 0xb0, 0xf5, 0x0e, 0xde, 0x5f, 0x2b, + 0xab, 0x5a, 0xc1, 0xb9, 0x28, 0x85, 0x6e, 0x74, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x30, 0xc5, + 0xaa, 0x88, 0x83, 0x0d, 0xe8, 0xe4, 0x18, 0xe3, 0xbb, 0x42, 0xc2, 0x28, 0x0f, 0xd6, 0x80, 0x93, + 0x63, 0x4c, 0x8a, 0x01, 0x64, 0x63, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xd3, 0x1a, 0x6d, 0xb4, 0x8c, + 0x5a, 0xbb, 0x65, 0x5f, 0xc4, 0xe6, 0xaa, 0xc2, 0xa3, 0x38, 0x61, 0x5a, 0x76, 0xcb, 0x34, 0x6a, + 0x6d, 0x8c, 0x36, 0x19, 0xaa, 0x7f, 0xd3, 0x6d, 0x51, 0xbd, 0x81, 0x93, 0xdb, 0x8c, 0xa5, 0xd7, + 0x6c, 0xbd, 0x81, 0x53, 0xc5, 0xbf, 0x11, 0x48, 0x3a, 0x9b, 0x8d, 0x77, 0x9e, 0x91, 0xaf, 0x40, + 0x71, 0xd8, 0x6c, 0x1c, 0x3e, 0x7f, 0x78, 0x49, 0x0e, 0xb7, 0x96, 0x8a, 0x7b, 0x8b, 0x65, 0xa0, + 0x77, 0xe2, 0xf8, 0x32, 0xa6, 0xfe, 0xf7, 0xe1, 0x90, 0x3e, 0xed, 0x70, 0xa4, 0xef, 0x77, 0x38, + 0xde, 0x80, 0x12, 0xb5, 0xb0, 0x73, 0x0a, 0x77, 0x1f, 0x76, 0x72, 0xed, 0xc3, 0xfe, 0xef, 0x3d, + 0x16, 0xbf, 0x06, 0x39, 0x84, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6, + 0xc2, 0xd5, 0xd8, 0x9c, 0x86, 0x92, 0xe7, 0x65, 0x90, 0xc3, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x0c, + 0xbb, 0xf6, 0xaa, 0x67, 0x51, 0xd3, 0x36, 0x2b, 0x38, 0xb1, 0x09, 0x55, 0x31, 0xaa, 0xff, 0x84, + 0xde, 0xdd, 0x6a, 0x89, 0x3f, 0x6e, 0xb5, 0xc4, 0x87, 0x5b, 0x0d, 0x7d, 0xbc, 0xd5, 0xd0, 0x8f, + 0x0b, 0x0d, 0xfd, 0xba, 0xd0, 0xd0, 0xcd, 0x42, 0x43, 0xef, 0x16, 0x1a, 0xfa, 0x73, 0xa1, 0xa1, + 0xbf, 0x16, 0x5a, 0xe2, 0x03, 0xc7, 0xdf, 0x6b, 0xe8, 0xe6, 0xbd, 0x86, 0xe0, 0xe1, 0xc0, 0x1b, + 0x6f, 0x96, 0x51, 0x57, 0xf8, 0xff, 0x8e, 0xc5, 0x2d, 0x0b, 0x7d, 0x97, 0xe6, 0x87, 0xcb, 0xff, + 0x88, 0xd0, 0x2f, 0xc9, 0x54, 0xd3, 0xaa, 0xff, 0x96, 0xd4, 0x9a, 0xa1, 0xdc, 0x5a, 0x55, 0xfd, + 0xd2, 0x71, 0xdd, 0x73, 0xe6, 0xfd, 0xc0, 0xb8, 0x9b, 0x7f, 0x29, 0x8b, 0x38, 0xd5, 0x7f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x74, 0x97, 0x69, 0x12, 0x2f, 0x07, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go index 18b384e..9ec3c54 100644 --- a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go +++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -1,23 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: wrappers.proto - -/* -Package types is a generated protocol buffer package. - -It is generated from these files: - wrappers.proto - -It has these top-level messages: - DoubleValue - FloatValue - Int64Value - UInt64Value - Int32Value - UInt32Value - BoolValue - StringValue - BytesValue -*/ +// source: google/protobuf/wrappers.proto + package types import proto "github.com/gogo/protobuf/proto" @@ -29,7 +12,7 @@ import bytes "bytes" import strings "strings" import reflect "reflect" -import binary "encoding/binary" +import encoding_binary "encoding/binary" import io "io" @@ -49,13 +32,44 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // The JSON representation for `DoubleValue` is JSON number. type DoubleValue struct { // The double value. - Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *DoubleValue) Reset() { *m = DoubleValue{} } -func (*DoubleValue) ProtoMessage() {} -func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{0} } -func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(dst, src) +} +func (m *DoubleValue) XXX_Size() int { + return m.Size() +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo func (m *DoubleValue) GetValue() float64 { if m != nil { @@ -64,18 +78,53 @@ func (m *DoubleValue) GetValue() float64 { return 0 } +func (*DoubleValue) XXX_MessageName() string { + return "google.protobuf.DoubleValue" +} + // Wrapper message for `float`. // // The JSON representation for `FloatValue` is JSON number. type FloatValue struct { // The float value. - Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(dst, src) +} +func (m *FloatValue) XXX_Size() int { + return m.Size() +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) } -func (m *FloatValue) Reset() { *m = FloatValue{} } -func (*FloatValue) ProtoMessage() {} -func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{1} } -func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +var xxx_messageInfo_FloatValue proto.InternalMessageInfo func (m *FloatValue) GetValue() float32 { if m != nil { @@ -84,18 +133,53 @@ func (m *FloatValue) GetValue() float32 { return 0 } +func (*FloatValue) XXX_MessageName() string { + return "google.protobuf.FloatValue" +} + // Wrapper message for `int64`. // // The JSON representation for `Int64Value` is JSON string. type Int64Value struct { // The int64 value. - Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(dst, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) } -func (m *Int64Value) Reset() { *m = Int64Value{} } -func (*Int64Value) ProtoMessage() {} -func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{2} } -func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +var xxx_messageInfo_Int64Value proto.InternalMessageInfo func (m *Int64Value) GetValue() int64 { if m != nil { @@ -104,18 +188,53 @@ func (m *Int64Value) GetValue() int64 { return 0 } +func (*Int64Value) XXX_MessageName() string { + return "google.protobuf.Int64Value" +} + // Wrapper message for `uint64`. // // The JSON representation for `UInt64Value` is JSON string. type UInt64Value struct { // The uint64 value. - Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(dst, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) } -func (m *UInt64Value) Reset() { *m = UInt64Value{} } -func (*UInt64Value) ProtoMessage() {} -func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{3} } -func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo func (m *UInt64Value) GetValue() uint64 { if m != nil { @@ -124,18 +243,53 @@ func (m *UInt64Value) GetValue() uint64 { return 0 } +func (*UInt64Value) XXX_MessageName() string { + return "google.protobuf.UInt64Value" +} + // Wrapper message for `int32`. // // The JSON representation for `Int32Value` is JSON number. type Int32Value struct { // The int32 value. - Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Int32Value) Reset() { *m = Int32Value{} } -func (*Int32Value) ProtoMessage() {} -func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{4} } -func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(dst, src) +} +func (m *Int32Value) XXX_Size() int { + return m.Size() +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo func (m *Int32Value) GetValue() int32 { if m != nil { @@ -144,18 +298,53 @@ func (m *Int32Value) GetValue() int32 { return 0 } +func (*Int32Value) XXX_MessageName() string { + return "google.protobuf.Int32Value" +} + // Wrapper message for `uint32`. // // The JSON representation for `UInt32Value` is JSON number. type UInt32Value struct { // The uint32 value. - Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UInt32Value) Reset() { *m = UInt32Value{} } -func (*UInt32Value) ProtoMessage() {} -func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{5} } -func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(dst, src) +} +func (m *UInt32Value) XXX_Size() int { + return m.Size() +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo func (m *UInt32Value) GetValue() uint32 { if m != nil { @@ -164,18 +353,53 @@ func (m *UInt32Value) GetValue() uint32 { return 0 } +func (*UInt32Value) XXX_MessageName() string { + return "google.protobuf.UInt32Value" +} + // Wrapper message for `bool`. // // The JSON representation for `BoolValue` is JSON `true` and `false`. type BoolValue struct { // The bool value. - Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(dst, src) +} +func (m *BoolValue) XXX_Size() int { + return m.Size() +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) } -func (m *BoolValue) Reset() { *m = BoolValue{} } -func (*BoolValue) ProtoMessage() {} -func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{6} } -func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +var xxx_messageInfo_BoolValue proto.InternalMessageInfo func (m *BoolValue) GetValue() bool { if m != nil { @@ -184,18 +408,53 @@ func (m *BoolValue) GetValue() bool { return false } +func (*BoolValue) XXX_MessageName() string { + return "google.protobuf.BoolValue" +} + // Wrapper message for `string`. // // The JSON representation for `StringValue` is JSON string. type StringValue struct { // The string value. - Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *StringValue) Reset() { *m = StringValue{} } -func (*StringValue) ProtoMessage() {} -func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{7} } -func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(dst, src) +} +func (m *StringValue) XXX_Size() int { + return m.Size() +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo func (m *StringValue) GetValue() string { if m != nil { @@ -204,18 +463,53 @@ func (m *StringValue) GetValue() string { return "" } +func (*StringValue) XXX_MessageName() string { + return "google.protobuf.StringValue" +} + // Wrapper message for `bytes`. // // The JSON representation for `BytesValue` is JSON string. type BytesValue struct { // The bytes value. - Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *BytesValue) Reset() { *m = BytesValue{} } -func (*BytesValue) ProtoMessage() {} -func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{8} } -func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_wrappers_b0966e4a6118a07f, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(dst, src) +} +func (m *BytesValue) XXX_Size() int { + return m.Size() +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo func (m *BytesValue) GetValue() []byte { if m != nil { @@ -224,6 +518,9 @@ func (m *BytesValue) GetValue() []byte { return nil } +func (*BytesValue) XXX_MessageName() string { + return "google.protobuf.BytesValue" +} func init() { proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") @@ -266,6 +563,9 @@ func (this *DoubleValue) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *FloatValue) Compare(that interface{}) int { @@ -299,6 +599,9 @@ func (this *FloatValue) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *Int64Value) Compare(that interface{}) int { @@ -332,6 +635,9 @@ func (this *Int64Value) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *UInt64Value) Compare(that interface{}) int { @@ -365,6 +671,9 @@ func (this *UInt64Value) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *Int32Value) Compare(that interface{}) int { @@ -398,6 +707,9 @@ func (this *Int32Value) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *UInt32Value) Compare(that interface{}) int { @@ -431,6 +743,9 @@ func (this *UInt32Value) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *BoolValue) Compare(that interface{}) int { @@ -464,6 +779,9 @@ func (this *BoolValue) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *StringValue) Compare(that interface{}) int { @@ -497,6 +815,9 @@ func (this *StringValue) Compare(that interface{}) int { } return 1 } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *BytesValue) Compare(that interface{}) int { @@ -527,6 +848,9 @@ func (this *BytesValue) Compare(that interface{}) int { if c := bytes.Compare(this.Value, that1.Value); c != 0 { return c } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } return 0 } func (this *DoubleValue) Equal(that interface{}) bool { @@ -551,6 +875,9 @@ func (this *DoubleValue) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *FloatValue) Equal(that interface{}) bool { @@ -575,6 +902,9 @@ func (this *FloatValue) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Int64Value) Equal(that interface{}) bool { @@ -599,6 +929,9 @@ func (this *Int64Value) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *UInt64Value) Equal(that interface{}) bool { @@ -623,6 +956,9 @@ func (this *UInt64Value) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *Int32Value) Equal(that interface{}) bool { @@ -647,6 +983,9 @@ func (this *Int32Value) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *UInt32Value) Equal(that interface{}) bool { @@ -671,6 +1010,9 @@ func (this *UInt32Value) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *BoolValue) Equal(that interface{}) bool { @@ -695,6 +1037,9 @@ func (this *BoolValue) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *StringValue) Equal(that interface{}) bool { @@ -719,6 +1064,9 @@ func (this *StringValue) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *BytesValue) Equal(that interface{}) bool { @@ -743,6 +1091,9 @@ func (this *BytesValue) Equal(that interface{}) bool { if !bytes.Equal(this.Value, that1.Value) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *DoubleValue) GoString() string { @@ -752,6 +1103,9 @@ func (this *DoubleValue) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.DoubleValue{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -762,6 +1116,9 @@ func (this *FloatValue) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.FloatValue{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -772,6 +1129,9 @@ func (this *Int64Value) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.Int64Value{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -782,6 +1142,9 @@ func (this *UInt64Value) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.UInt64Value{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -792,6 +1155,9 @@ func (this *Int32Value) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.Int32Value{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -802,6 +1168,9 @@ func (this *UInt32Value) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.UInt32Value{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -812,6 +1181,9 @@ func (this *BoolValue) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.BoolValue{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -822,6 +1194,9 @@ func (this *StringValue) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.StringValue{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -832,6 +1207,9 @@ func (this *BytesValue) GoString() string { s := make([]string, 0, 5) s = append(s, "&types.BytesValue{") s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } s = append(s, "}") return strings.Join(s, "") } @@ -861,9 +1239,12 @@ func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { if m.Value != 0 { dAtA[i] = 0x9 i++ - binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) i += 8 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -885,9 +1266,12 @@ func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { if m.Value != 0 { dAtA[i] = 0xd i++ - binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) i += 4 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -911,6 +1295,9 @@ func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -934,6 +1321,9 @@ func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -957,6 +1347,9 @@ func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -980,6 +1373,9 @@ func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1008,6 +1404,9 @@ func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1032,6 +1431,9 @@ func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1056,6 +1458,9 @@ func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -1075,6 +1480,7 @@ func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { this.Value *= -1 } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1086,6 +1492,7 @@ func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { this.Value *= -1 } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1097,6 +1504,7 @@ func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { this.Value *= -1 } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1105,6 +1513,7 @@ func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { this := &UInt64Value{} this.Value = uint64(uint64(r.Uint32())) if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1116,6 +1525,7 @@ func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { this.Value *= -1 } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1124,6 +1534,7 @@ func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { this := &UInt32Value{} this.Value = uint32(r.Uint32()) if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1132,6 +1543,7 @@ func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { this := &BoolValue{} this.Value = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1140,6 +1552,7 @@ func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { this := &StringValue{} this.Value = string(randStringWrappers(r)) if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1152,6 +1565,7 @@ func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { this.Value[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) } return this } @@ -1234,6 +1648,9 @@ func (m *DoubleValue) Size() (n int) { if m.Value != 0 { n += 9 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1243,6 +1660,9 @@ func (m *FloatValue) Size() (n int) { if m.Value != 0 { n += 5 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1252,6 +1672,9 @@ func (m *Int64Value) Size() (n int) { if m.Value != 0 { n += 1 + sovWrappers(uint64(m.Value)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1261,6 +1684,9 @@ func (m *UInt64Value) Size() (n int) { if m.Value != 0 { n += 1 + sovWrappers(uint64(m.Value)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1270,6 +1696,9 @@ func (m *Int32Value) Size() (n int) { if m.Value != 0 { n += 1 + sovWrappers(uint64(m.Value)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1279,6 +1708,9 @@ func (m *UInt32Value) Size() (n int) { if m.Value != 0 { n += 1 + sovWrappers(uint64(m.Value)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1288,6 +1720,9 @@ func (m *BoolValue) Size() (n int) { if m.Value { n += 2 } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1298,6 +1733,9 @@ func (m *StringValue) Size() (n int) { if l > 0 { n += 1 + l + sovWrappers(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1308,6 +1746,9 @@ func (m *BytesValue) Size() (n int) { if l > 0 { n += 1 + l + sovWrappers(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -1330,6 +1771,7 @@ func (this *DoubleValue) String() string { } s := strings.Join([]string{`&DoubleValue{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1340,6 +1782,7 @@ func (this *FloatValue) String() string { } s := strings.Join([]string{`&FloatValue{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1350,6 +1793,7 @@ func (this *Int64Value) String() string { } s := strings.Join([]string{`&Int64Value{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1360,6 +1804,7 @@ func (this *UInt64Value) String() string { } s := strings.Join([]string{`&UInt64Value{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1370,6 +1815,7 @@ func (this *Int32Value) String() string { } s := strings.Join([]string{`&Int32Value{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1380,6 +1826,7 @@ func (this *UInt32Value) String() string { } s := strings.Join([]string{`&UInt32Value{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1390,6 +1837,7 @@ func (this *BoolValue) String() string { } s := strings.Join([]string{`&BoolValue{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1400,6 +1848,7 @@ func (this *StringValue) String() string { } s := strings.Join([]string{`&StringValue{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1410,6 +1859,7 @@ func (this *BytesValue) String() string { } s := strings.Join([]string{`&BytesValue{`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") return s @@ -1459,7 +1909,7 @@ func (m *DoubleValue) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 m.Value = float64(math.Float64frombits(v)) default: @@ -1474,6 +1924,7 @@ func (m *DoubleValue) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1520,7 +1971,7 @@ func (m *FloatValue) Unmarshal(dAtA []byte) error { if (iNdEx + 4) > l { return io.ErrUnexpectedEOF } - v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) iNdEx += 4 m.Value = float32(math.Float32frombits(v)) default: @@ -1535,6 +1986,7 @@ func (m *FloatValue) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1604,6 +2056,7 @@ func (m *Int64Value) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1673,6 +2126,7 @@ func (m *UInt64Value) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1742,6 +2196,7 @@ func (m *Int32Value) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1811,6 +2266,7 @@ func (m *UInt32Value) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1881,6 +2337,7 @@ func (m *BoolValue) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1960,6 +2417,7 @@ func (m *StringValue) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2041,6 +2499,7 @@ func (m *BytesValue) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2155,26 +2614,29 @@ var ( ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("wrappers.proto", fileDescriptorWrappers) } - -var fileDescriptorWrappers = []byte{ - // 278 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x2f, 0x4a, 0x2c, - 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, - 0x52, 0xc3, 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, - 0x46, 0x0d, 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, - 0x1a, 0x26, 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, - 0x71, 0x87, 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, - 0x55, 0xc4, 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, - 0x39, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, - 0xb5, 0x18, 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x76, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, - 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, - 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, - 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, - 0x43, 0x8b, 0x0e, 0x27, 0xde, 0x70, 0x68, 0x7c, 0x05, 0x80, 0x44, 0x02, 0x18, 0xa3, 0x58, 0x4b, - 0x2a, 0x0b, 0x52, 0x8b, 0x7f, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x76, 0x0f, 0x70, 0x5a, 0xc5, 0x24, - 0xe7, 0x0e, 0xd1, 0x12, 0x00, 0xd5, 0xa2, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, - 0x17, 0x02, 0x52, 0x99, 0xc4, 0x06, 0x36, 0xcb, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x23, 0x27, - 0x6c, 0x5f, 0xfa, 0x01, 0x00, 0x00, +func init() { + proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_b0966e4a6118a07f) +} + +var fileDescriptor_wrappers_b0966e4a6118a07f = []byte{ + // 289 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x37, + 0xe3, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, + 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, + 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, + 0x31, 0x9e, 0x78, 0x2c, 0xc7, 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x16, 0x25, 0x4e, 0xbc, + 0xe1, 0xd0, 0x38, 0x0b, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xff, 0xc7, 0xfe, 0x37, 0x0e, 0x02, 0x00, + 0x00, } diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go index e173016..5aeb646 100644 --- a/vendor/google.golang.org/grpc/balancer.go +++ b/vendor/google.golang.org/grpc/balancer.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "net" "sync" @@ -118,26 +117,6 @@ type Balancer interface { Close() error } -// downErr implements net.Error. It is constructed by gRPC internals and passed to the down -// call of Balancer. -type downErr struct { - timeout bool - temporary bool - desc string -} - -func (e downErr) Error() string { return e.desc } -func (e downErr) Timeout() bool { return e.timeout } -func (e downErr) Temporary() bool { return e.temporary } - -func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { - return downErr{ - timeout: timeout, - temporary: temporary, - desc: fmt.Sprintf(format, a...), - } -} - // RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch // the name resolution updates and updates the addresses available correspondingly. // @@ -410,7 +389,3 @@ func (rr *roundRobin) Close() error { type pickFirst struct { *roundRobin } - -func pickFirstBalancerV1(r naming.Resolver) Balancer { - return &pickFirst{&roundRobin{r: r}} -} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f9d83c2..069feb1 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -143,7 +143,11 @@ type Builder interface { } // PickOptions contains addition information for the Pick operation. -type PickOptions struct{} +type PickOptions struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string +} // DoneInfo contains additional information for done. type DoneInfo struct { diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index f73b7d5..180d79d 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -63,31 +63,12 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { - // TODO: implement retries in clientStream and make this simply - // newClientStream, SendMsg, RecvMsg. - firstAttempt := true - for { - csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) - if err != nil { - return err - } - cs := csInt.(*clientStream) - if err := cs.SendMsg(req); err != nil { - if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { - // TODO: Add a field to header for grpc-transparent-retry-attempts - firstAttempt = false - continue - } - return err - } - if err := cs.RecvMsg(reply); err != nil { - if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { - // TODO: Add a field to header for grpc-transparent-retry-attempts - firstAttempt = false - continue - } - return err - } - return nil + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) } diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 84ba9e5..318ac40 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -26,6 +26,7 @@ import ( "reflect" "strings" "sync" + "sync/atomic" "time" "golang.org/x/net/context" @@ -36,16 +37,14 @@ import ( "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. - "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" ) const ( @@ -66,8 +65,6 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnUnavailable indicates that the connection is unavailable. - errConnUnavailable = errors.New("grpc: the connection is unavailable") // errBalancerClosed indicates that the balancer is closed. errBalancerClosed = errors.New("grpc: balancer is closed") // We use an accessor so that minConnectTimeout can be @@ -90,351 +87,16 @@ var ( // errCredentialsConflict indicates that grpc.WithTransportCredentials() // and grpc.WithInsecure() are both called for a connection. errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") - // errNetworkIO indicates that the connection is down due to some network I/O error. - errNetworkIO = errors.New("grpc: failed with network I/O error") ) -// dialOptions configure a Dial call. dialOptions are set by the DialOption -// values passed to Dial. -type dialOptions struct { - unaryInt UnaryClientInterceptor - streamInt StreamClientInterceptor - cp Compressor - dc Decompressor - bs backoff.Strategy - block bool - insecure bool - timeout time.Duration - scChan <-chan ServiceConfig - copts transport.ConnectOptions - callOptions []CallOption - // This is used by v1 balancer dial option WithBalancer to support v1 - // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder - waitForHandshake bool - channelzParentID int64 - disableServiceConfig bool -} - const ( defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 ) -// RegisterChannelz turns on channelz service. -// This is an EXPERIMENTAL API. -func RegisterChannelz() { - channelz.TurnOn() -} - -// DialOption configures how we set up the connection. -type DialOption func(*dialOptions) - -// WithWaitForHandshake blocks until the initial settings frame is received from the -// server before assigning RPCs to the connection. -// Experimental API. -func WithWaitForHandshake() DialOption { - return func(o *dialOptions) { - o.waitForHandshake = true - } -} - -// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched -// before doing a write on the wire. -func WithWriteBufferSize(s int) DialOption { - return func(o *dialOptions) { - o.copts.WriteBufferSize = s - } -} - -// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for each read syscall. -func WithReadBufferSize(s int) DialOption { - return func(o *dialOptions) { - o.copts.ReadBufferSize = s - } -} - -// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func WithInitialWindowSize(s int32) DialOption { - return func(o *dialOptions) { - o.copts.InitialWindowSize = s - } -} - -// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. -// The lower bound for window size is 64K and any value smaller than that will be ignored. -func WithInitialConnWindowSize(s int32) DialOption { - return func(o *dialOptions) { - o.copts.InitialConnWindowSize = s - } -} - -// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. -// -// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. -func WithMaxMsgSize(s int) DialOption { - return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) -} - -// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. -func WithDefaultCallOptions(cos ...CallOption) DialOption { - return func(o *dialOptions) { - o.callOptions = append(o.callOptions, cos...) - } -} - -// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. -// -// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. -func WithCodec(c Codec) DialOption { - return WithDefaultCallOptions(CallCustomCodec(c)) -} - -// WithCompressor returns a DialOption which sets a Compressor to use for -// message compression. It has lower priority than the compressor set by -// the UseCompressor CallOption. -// -// Deprecated: use UseCompressor instead. -func WithCompressor(cp Compressor) DialOption { - return func(o *dialOptions) { - o.cp = cp - } -} - -// WithDecompressor returns a DialOption which sets a Decompressor to use for -// incoming message decompression. If incoming response messages are encoded -// using the decompressor's Type(), it will be used. Otherwise, the message -// encoding will be used to look up the compressor registered via -// encoding.RegisterCompressor, which will then be used to decompress the -// message. If no compressor is registered for the encoding, an Unimplemented -// status error will be returned. -// -// Deprecated: use encoding.RegisterCompressor instead. -func WithDecompressor(dc Decompressor) DialOption { - return func(o *dialOptions) { - o.dc = dc - } -} - -// WithBalancer returns a DialOption which sets a load balancer with the v1 API. -// Name resolver will be ignored if this DialOption is specified. -// -// Deprecated: use the new balancer APIs in balancer package and WithBalancerName. -func WithBalancer(b Balancer) DialOption { - return func(o *dialOptions) { - o.balancerBuilder = &balancerWrapperBuilder{ - b: b, - } - } -} - -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// This is an EXPERIMENTAL API. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return func(o *dialOptions) { - o.balancerBuilder = builder - } -} - -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return func(o *dialOptions) { - o.resolverBuilder = b - } -} - -// WithServiceConfig returns a DialOption which has a channel to read the service configuration. -// -// Deprecated: service config should be received through name resolver, as specified here. -// https://github.com/grpc/grpc/blob/master/doc/service_config.md -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return func(o *dialOptions) { - o.scChan = c - } -} - -// WithBackoffMaxDelay configures the dialer to use the provided maximum delay -// when backing off after failed connection attempts. -func WithBackoffMaxDelay(md time.Duration) DialOption { - return WithBackoffConfig(BackoffConfig{MaxDelay: md}) -} - -// WithBackoffConfig configures the dialer to use the provided backoff -// parameters after connection failures. -// -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. -func WithBackoffConfig(b BackoffConfig) DialOption { - - return withBackoff(backoff.Exponential{ - MaxDelay: b.MaxDelay, - }) -} - -// withBackoff sets the backoff strategy used for connectRetryNum after a -// failed connection attempt. -// -// This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoff.Strategy) DialOption { - return func(o *dialOptions) { - o.bs = bs - } -} - -// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying -// connection is up. Without this, Dial returns immediately and connecting the server -// happens in background. -func WithBlock() DialOption { - return func(o *dialOptions) { - o.block = true - } -} - -// WithInsecure returns a DialOption which disables transport security for this ClientConn. -// Note that transport security is required unless WithInsecure is set. -func WithInsecure() DialOption { - return func(o *dialOptions) { - o.insecure = true - } -} - -// WithTransportCredentials returns a DialOption which configures a -// connection level security credentials (e.g., TLS/SSL). -func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { - return func(o *dialOptions) { - o.copts.TransportCredentials = creds - } -} - -// WithPerRPCCredentials returns a DialOption which sets -// credentials and places auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { - return func(o *dialOptions) { - o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) - } -} - -// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn -// initially. This is valid if and only if WithBlock() is present. -// -// Deprecated: use DialContext and context.WithTimeout instead. -func WithTimeout(d time.Duration) DialOption { - return func(o *dialOptions) { - o.timeout = d - } -} - -func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { - return func(o *dialOptions) { - o.copts.Dialer = f - } -} - -func init() { - internal.WithContextDialer = withContextDialer - internal.WithResolverBuilder = withResolverBuilder -} - -// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. -// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's -// Temporary() method to decide if it should try to reconnect to the network address. -func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return withContextDialer( - func(ctx context.Context, addr string) (net.Conn, error) { - if deadline, ok := ctx.Deadline(); ok { - return f(addr, deadline.Sub(time.Now())) - } - return f(addr, 0) - }) -} - -// WithStatsHandler returns a DialOption that specifies the stats handler -// for all the RPCs and underlying network connections in this ClientConn. -func WithStatsHandler(h stats.Handler) DialOption { - return func(o *dialOptions) { - o.copts.StatsHandler = h - } -} - -// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. -// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network -// address and won't try to reconnect. -// The default value of FailOnNonTempDialError is false. -// This is an EXPERIMENTAL API. -func FailOnNonTempDialError(f bool) DialOption { - return func(o *dialOptions) { - o.copts.FailOnNonTempDialError = f - } -} - -// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. -func WithUserAgent(s string) DialOption { - return func(o *dialOptions) { - o.copts.UserAgent = s - } -} - -// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. -func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { - return func(o *dialOptions) { - o.copts.KeepaliveParams = kp - } -} - -// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. -func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { - return func(o *dialOptions) { - o.unaryInt = f - } -} - -// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs. -func WithStreamInterceptor(f StreamClientInterceptor) DialOption { - return func(o *dialOptions) { - o.streamInt = f - } -} - -// WithAuthority returns a DialOption that specifies the value to be used as -// the :authority pseudo-header. This value only works with WithInsecure and -// has no effect if TransportCredentials are present. -func WithAuthority(a string) DialOption { - return func(o *dialOptions) { - o.copts.Authority = a - } -} - -// WithChannelzParentID returns a DialOption that specifies the channelz ID of current ClientConn's -// parent. This function is used in nested channel creation (e.g. grpclb dial). -func WithChannelzParentID(id int64) DialOption { - return func(o *dialOptions) { - o.channelzParentID = id - } -} - -// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any -// service config provided by the resolver and provides a hint to the resolver -// to not fetch service configs. -func WithDisableServiceConfig() DialOption { - return func(o *dialOptions) { - o.disableServiceConfig = true - } -} - // Dial creates a client connection to the given target. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) @@ -458,23 +120,25 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), blockingpicker: newPickerWrapper(), + czData: new(channelzData), } + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.ctx, cc.cancel = context.WithCancel(context.Background()) for _, opt := range opts { - opt(&cc.dopts) + opt.apply(&cc.dopts) } if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(cc, cc.dopts.channelzParentID, target) + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) } else { - cc.channelzID = channelz.RegisterChannel(cc, 0, target) + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) } } @@ -567,8 +231,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName - } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { - cc.authority = cc.dopts.copts.Authority + } else if cc.dopts.insecure && cc.dopts.authority != "" { + cc.authority = cc.dopts.authority } else { // Use endpoint from "scheme://authority/endpoint" as the default // authority for ClientConn. @@ -620,6 +284,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * s := cc.GetState() if s == connectivity.Ready { break + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.blockingpicker.connectionError(); err != nil { + terr, ok := err.(interface{ Temporary() bool }) + if ok && !terr.Temporary() { + return nil, err + } + } } if !cc.WaitForStateChange(ctx, s) { // ctx got timeout or canceled. @@ -699,13 +370,10 @@ type ClientConn struct { preBalancerName string // previous balancer name. curAddresses []resolver.Address balancerWrapper *ccBalancerWrapper + retryThrottler atomic.Value - channelzID int64 // channelz unique identification number - czmu sync.RWMutex - callsStarted int64 - callsSucceeded int64 - callsFailed int64 - lastCallStartedTime time.Time + channelzID int64 // channelz unique identification number + czData *channelzData } // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or @@ -830,8 +498,6 @@ func (cc *ClientConn) switchBalancer(name string) { if cc.balancerWrapper != nil { cc.balancerWrapper.close() } - // Clear all stickiness state. - cc.blockingpicker.clearStickinessState() builder := balancer.Get(name) if builder == nil { @@ -860,9 +526,11 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { ac := &addrConn{ - cc: cc, - addrs: addrs, - dopts: cc.dopts, + cc: cc, + addrs: addrs, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -892,40 +560,34 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } -// ChannelzMetric returns ChannelInternalMetric of current ClientConn. -// This is an EXPERIMENTAL API. -func (cc *ClientConn) ChannelzMetric() *channelz.ChannelInternalMetric { - state := cc.GetState() - cc.czmu.RLock() - defer cc.czmu.RUnlock() +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { return &channelz.ChannelInternalMetric{ - State: state, + State: cc.GetState(), Target: cc.target, - CallsStarted: cc.callsStarted, - CallsSucceeded: cc.callsSucceeded, - CallsFailed: cc.callsFailed, - LastCallStartedTimestamp: cc.lastCallStartedTime, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), } } +// Target returns the target string of the ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) Target() string { + return cc.target +} + func (cc *ClientConn) incrCallsStarted() { - cc.czmu.Lock() - cc.callsStarted++ - // TODO(yuxuanli): will make this a time.Time pointer improve performance? - cc.lastCallStartedTime = time.Now() - cc.czmu.Unlock() + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) } func (cc *ClientConn) incrCallsSucceeded() { - cc.czmu.Lock() - cc.callsSucceeded++ - cc.czmu.Unlock() + atomic.AddInt64(&cc.czData.callsSucceeded, 1) } func (cc *ClientConn) incrCallsFailed() { - cc.czmu.Lock() - cc.callsFailed++ - cc.czmu.Unlock() + atomic.AddInt64(&cc.czData.callsFailed, 1) } // connect starts to creating transport and also starts the transport monitor @@ -1012,8 +674,10 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { return m } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { - t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{ + FullMethodName: method, + }) if err != nil { return nil, nil, toRPCErr(err) } @@ -1033,6 +697,19 @@ func (cc *ClientConn) handleServiceConfig(js string) error { cc.mu.Lock() cc.scRaw = js cc.sc = sc + + if sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: sc.retryThrottling.MaxTokens, + max: sc.retryThrottling.MaxTokens, + thresh: sc.retryThrottling.MaxTokens / 2, + ratio: sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config. if cc.curBalancerName == grpclbName { // If current balancer is grpclb, there's at least one grpclb @@ -1047,17 +724,6 @@ func (cc *ClientConn) handleServiceConfig(js string) error { } } - if envConfigStickinessOn { - var newStickinessMDKey string - if sc.stickinessMetadataKey != nil && *sc.stickinessMetadataKey != "" { - newStickinessMDKey = *sc.stickinessMetadataKey - } - // newStickinessMDKey is "" if one of the following happens: - // - stickinessMetadataKey is set to "" - // - stickinessMetadataKey field doesn't exist in service config - cc.blockingpicker.updateStickinessMDKey(strings.ToLower(newStickinessMDKey)) - } - cc.mu.Unlock() return nil } @@ -1072,6 +738,24 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { go r.resolveNow(o) } +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// This API is EXPERIMENTAL. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + defer cc.mu.Unlock() + for ac := range cc.conns { + ac.resetConnectBackoff() + } +} + // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { defer cc.cancel() @@ -1140,12 +824,10 @@ type addrConn struct { // negotiations must complete. connectDeadline time.Time - channelzID int64 // channelz unique identification number - czmu sync.RWMutex - callsStarted int64 - callsSucceeded int64 - callsFailed int64 - lastCallStartedTime time.Time + resetBackoff chan struct{} + + channelzID int64 // channelz unique identification number + czData *channelzData } // adjustParams updates parameters used to create transports upon @@ -1170,14 +852,6 @@ func (ac *addrConn) printf(format string, a ...interface{}) { } } -// errorf records an error in ac's event log, unless ac has been closed. -// REQUIRES ac.mu is held. -func (ac *addrConn) errorf(format string, a ...interface{}) { - if ac.events != nil { - ac.events.Errorf(format, a...) - } -} - // resetTransport recreates a transport to the address for ac. The old // transport will close itself on error or when the clientconn is closed. // The created transport must receive initial settings frame from the server. @@ -1208,6 +882,7 @@ func (ac *addrConn) resetTransport() error { ac.dopts.copts.KeepaliveParams = ac.cc.mkp ac.cc.mu.RUnlock() var backoffDeadline, connectDeadline time.Time + var resetBackoff chan struct{} for connectRetryNum := 0; ; connectRetryNum++ { ac.mu.Lock() if ac.backoffDeadline.IsZero() { @@ -1215,6 +890,7 @@ func (ac *addrConn) resetTransport() error { // or this is the first time this addrConn is trying to establish a // connection. backoffFor := ac.dopts.bs.Backoff(connectRetryNum) // time.Duration. + resetBackoff = ac.resetBackoff // This will be the duration that dial gets to finish. dialDuration := getMinConnectTimeout() if backoffFor > dialDuration { @@ -1248,7 +924,7 @@ func (ac *addrConn) resetTransport() error { copy(addrsIter, ac.addrs) copts := ac.dopts.copts ac.mu.Unlock() - connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts) + connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts, resetBackoff) if err != nil { return err } @@ -1260,7 +936,7 @@ func (ac *addrConn) resetTransport() error { // createTransport creates a connection to one of the backends in addrs. // It returns true if a connection was established. -func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) { +func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions, resetBackoff chan struct{}) (bool, error) { for i := ridx; i < len(addrs); i++ { addr := addrs[i] target := transport.TargetInfo{ @@ -1311,7 +987,7 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, // Didn't receive server preface, must kill this new transport now. grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.") newTr.Close() - break + continue case <-ac.ctx.Done(): } } @@ -1360,6 +1036,8 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, timer := time.NewTimer(backoffDeadline.Sub(time.Now())) select { case <-timer.C: + case <-resetBackoff: + timer.Stop() case <-ac.ctx.Done(): timer.Stop() return false, ac.ctx.Err() @@ -1367,6 +1045,14 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, return false, nil } +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.resetBackoff = make(chan struct{}) + ac.connectRetryNum = 0 + ac.mu.Unlock() +} + // Run in a goroutine to track the error in transport and create the // new transport if an error happens. It returns when the channel is closing. func (ac *addrConn) transportMonitor() { @@ -1447,46 +1133,6 @@ func (ac *addrConn) transportMonitor() { } } -// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or -// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. -func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { - for { - ac.mu.Lock() - switch { - case ac.state == connectivity.Shutdown: - if failfast || !hasBalancer { - // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. - err := ac.tearDownErr - ac.mu.Unlock() - return nil, err - } - ac.mu.Unlock() - return nil, errConnClosing - case ac.state == connectivity.Ready: - ct := ac.transport - ac.mu.Unlock() - return ct, nil - case ac.state == connectivity.TransientFailure: - if failfast || hasBalancer { - ac.mu.Unlock() - return nil, errConnUnavailable - } - } - ready := ac.ready - if ready == nil { - ready = make(chan struct{}) - ac.ready = ready - } - ac.mu.Unlock() - select { - case <-ctx.Done(): - return nil, toRPCErr(ctx.Err()) - // Wait until the new transport is ready or failed. - case <-ready: - } - } -} - // getReadyTransport returns the transport if ac's state is READY. // Otherwise it returns nil, false. // If ac's state is IDLE, it will trigger ac to connect. @@ -1551,47 +1197,76 @@ func (ac *addrConn) getState() connectivity.State { return ac.state } -func (ac *addrConn) getCurAddr() (ret resolver.Address) { - ac.mu.Lock() - ret = ac.curAddr - ac.mu.Unlock() - return -} - func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { ac.mu.Lock() addr := ac.curAddr.Addr ac.mu.Unlock() - state := ac.getState() - ac.czmu.RLock() - defer ac.czmu.RUnlock() return &channelz.ChannelInternalMetric{ - State: state, + State: ac.getState(), Target: addr, - CallsStarted: ac.callsStarted, - CallsSucceeded: ac.callsSucceeded, - CallsFailed: ac.callsFailed, - LastCallStartedTimestamp: ac.lastCallStartedTime, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), } } func (ac *addrConn) incrCallsStarted() { - ac.czmu.Lock() - ac.callsStarted++ - ac.lastCallStartedTime = time.Now() - ac.czmu.Unlock() + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) } func (ac *addrConn) incrCallsSucceeded() { - ac.czmu.Lock() - ac.callsSucceeded++ - ac.czmu.Unlock() + atomic.AddInt64(&ac.czData.callsSucceeded, 1) } func (ac *addrConn) incrCallsFailed() { - ac.czmu.Lock() - ac.callsFailed++ - ac.czmu.Unlock() + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() } // ErrClientConnTimeout indicates that the ClientConn cannot establish the diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 3351bf0..1dae57a 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -31,6 +31,7 @@ import ( "net" "strings" + "github.com/golang/protobuf/proto" "golang.org/x/net/context" ) @@ -118,6 +119,18 @@ func (t TLSInfo) AuthType() string { return "tls" } +// GetChannelzSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetChannelzSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + // tlsCreds is the credentials required for authenticating a connection using TLS. type tlsCreds struct { // TLS configuration @@ -155,7 +168,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon case <-ctx.Done(): return nil, nil, ctx.Err() } - return conn, TLSInfo{conn.ConnectionState()}, nil + return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { @@ -163,7 +176,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if err := conn.Handshake(); err != nil { return nil, nil, err } - return conn, TLSInfo{conn.ConnectionState()}, nil + return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil } func (c *tlsCreds) Clone() TransportCredentials { @@ -218,3 +231,63 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error } return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil } + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +type TLSChannelzSecurityValue struct { + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +type OtherChannelzSecurityValue struct { + Name string + Value proto.Message +} + +func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {} + +type tlsConn struct { + *tls.Conn + rawConn net.Conn +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/go16.go similarity index 100% rename from vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go rename to vendor/google.golang.org/grpc/credentials/go16.go diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/go17.go similarity index 98% rename from vendor/google.golang.org/grpc/credentials/credentials_util_go17.go rename to vendor/google.golang.org/grpc/credentials/go17.go index 60409aa..fbd5000 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ b/vendor/google.golang.org/grpc/credentials/go17.go @@ -1,5 +1,4 @@ -// +build go1.7 -// +build !go1.8 +// +build go1.7,!go1.8 /* * diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/go18.go similarity index 65% rename from vendor/google.golang.org/grpc/credentials/credentials_util_go18.go rename to vendor/google.golang.org/grpc/credentials/go18.go index 93f0e1d..db30d46 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go +++ b/vendor/google.golang.org/grpc/credentials/go18.go @@ -24,6 +24,14 @@ import ( "crypto/tls" ) +func init() { + cipherSuiteLookup[tls.TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256" + cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256" + cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256" + cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305" + cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305" +} + // cloneTLSConfig returns a shallow clone of the exported // fields of cfg, ignoring the unexported sync.Once, which // contains a mutex and must not be copied. diff --git a/vendor/google.golang.org/grpc/credentials/go19.go b/vendor/google.golang.org/grpc/credentials/go19.go new file mode 100644 index 0000000..2a4ca1a --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/go19.go @@ -0,0 +1,35 @@ +// +build go1.9,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "errors" + "syscall" +) + +// implements the syscall.Conn interface +func (c tlsConn) SyscallConn() (syscall.RawConn, error) { + conn, ok := c.rawConn.(syscall.Conn) + if !ok { + return nil, errors.New("RawConn does not implement syscall.Conn") + } + return conn.SyscallConn() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 0000000..3d3c9e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,453 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "net" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + cp Compressor + dc Decompressor + bs backoff.Strategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + // This is to support grpclb. + resolverBuilder resolver.Builder + waitForHandshake bool + channelzParentID int64 + disableServiceConfig bool + disableRetry bool +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// This API is EXPERIMENTAL. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +// WithWaitForHandshake blocks until the initial settings frame is received from +// the server before assigning RPCs to the connection. Experimental API. +func WithWaitForHandshake() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.waitForHandshake = true + }) +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero will disable read buffer for +// a connection so data framer can access the underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and +// WithBalancerName. +func WithBalancer(b Balancer) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + }) +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// This is an EXPERIMENTAL API. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return newFuncDialOption(func(o *dialOptions) { + o.balancerBuilder = builder + }) +} + +// withResolverBuilder is only for grpclb. +func withResolverBuilder(b resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolverBuilder = b + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver, as +// specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + return withBackoff(backoff.Exponential{ + MaxDelay: b.MaxDelay, + }) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Note that transport security is required unless WithInsecure is +// set. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.insecure = true + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext and context.WithTimeout instead. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithContextDialer = withContextDialer + internal.WithResolverBuilder = withResolverBuilder +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return withContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.StatsHandler = h + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header. This value only works with WithInsecure and has no +// effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +func WithChannelzParentID(id int64) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +// +// Retry support is currently disabled by default, but will be enabled by +// default in the future. Until then, it may be enabled by setting the +// environment variable "GRPC_GO_RETRY" to "on". +// +// This API is EXPERIMENTAL. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + disableRetry: !envconfig.Retry, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + }, + } +} diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go index 535ee93..b1db21a 100644 --- a/vendor/google.golang.org/grpc/go16.go +++ b/vendor/google.golang.org/grpc/go16.go @@ -28,8 +28,8 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" ) // dialContext connects to the address on the named network. @@ -50,12 +50,13 @@ func toRPCErr(err error) error { if err == nil || err == io.EOF { return err } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } if _, ok := status.FromError(err); ok { return err } switch e := err.(type) { - case transport.StreamError: - return status.Error(e.Code, e.Desc) case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) default: diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go index ec676a9..71a72e8 100644 --- a/vendor/google.golang.org/grpc/go17.go +++ b/vendor/google.golang.org/grpc/go17.go @@ -29,8 +29,8 @@ import ( netctx "golang.org/x/net/context" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" ) // dialContext connects to the address on the named network. @@ -51,12 +51,13 @@ func toRPCErr(err error) error { if err == nil || err == io.EOF { return err } + if err == io.ErrUnexpectedEOF { + return status.Error(codes.Internal, err.Error()) + } if _, ok := status.FromError(err); ok { return err } switch e := err.(type) { - case transport.StreamError: - return status.Error(e.Code, e.Desc) case transport.ConnectionError: return status.Error(codes.Unavailable, e.Desc) default: diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 153d753..6fd6bb3 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -23,6 +23,7 @@ import ( "time" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" ) @@ -281,9 +282,9 @@ type SocketInternalMetric struct { RemoteAddr net.Addr // Optional, represents the name of the remote endpoint, if different than // the original target name. - RemoteName string - //TODO: socket options - //TODO: Security + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue } // Socket is the interface that should be satisfied in order to be tracked by diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 0000000..0721539 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,53 @@ +// +build !appengine,go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 0000000..884910c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,38 @@ +// +build !linux appengine !go1.7 + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "google.golang.org/grpc/grpclog" + +func init() { + grpclog.Infof("Channelz: socket options are not supported on non-linux os and appengine.") +} + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) {} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go new file mode 100644 index 0000000..e1e9e32 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go @@ -0,0 +1,39 @@ +// +build linux,go1.9,!appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket interface{}) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go new file mode 100644 index 0000000..1d4da95 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go @@ -0,0 +1,26 @@ +// +build !linux !go1.9 appengine + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c interface{}) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go similarity index 70% rename from vendor/google.golang.org/grpc/envconfig.go rename to vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index d50178e..3ee8740 100644 --- a/vendor/google.golang.org/grpc/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -16,7 +16,8 @@ * */ -package grpc +// Package envconfig contains grpc settings configured by environment variables. +package envconfig import ( "os" @@ -24,14 +25,11 @@ import ( ) const ( - envConfigPrefix = "GRPC_GO_" - envConfigStickinessStr = envConfigPrefix + "STICKINESS" + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" ) var ( - envConfigStickinessOn bool + // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". + Retry = strings.EqualFold(os.Getenv(retryStr), "on") ) - -func init() { - envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on") -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index cd34267..c35afb0 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -21,14 +21,6 @@ package internal var ( - - // TestingUseHandlerImpl enables the http.Handler-based server implementation. - // It must be called before Serve and requires TLS credentials. - // - // The provided grpcServer must be of type *grpc.Server. It is untyped - // for circular dependency reasons. - TestingUseHandlerImpl func(grpcServer interface{}) - // WithContextDialer is exported by clientconn.go WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption // WithResolverBuilder is exported by clientconn.go diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go similarity index 100% rename from vendor/google.golang.org/grpc/transport/bdp_estimator.go rename to vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go diff --git a/vendor/google.golang.org/grpc/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go similarity index 81% rename from vendor/google.golang.org/grpc/transport/controlbuf.go rename to vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 5c5891a..204ba15 100644 --- a/vendor/google.golang.org/grpc/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -104,7 +104,6 @@ type headerFrame struct { type cleanupStream struct { streamID uint32 - idPtr *uint32 rst bool rstCode http2.ErrCode onWrite func() @@ -138,9 +137,6 @@ type outgoingSettings struct { ss []http2.Setting } -type settingsAck struct { -} - type incomingGoAway struct { } @@ -229,6 +225,12 @@ func (l *outStreamList) dequeue() *outStream { return b } +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. type controlBuffer struct { ch chan struct{} done <-chan struct{} @@ -279,6 +281,21 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{ return true, nil } +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + func (c *controlBuffer) get(block bool) (interface{}, error) { for { c.mu.Lock() @@ -335,13 +352,29 @@ const ( serverSide ) +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. type loopyWriter struct { - side side - cbuf *controlBuffer - sendQuota uint32 - oiws uint32 // outbound initial window size. - estdStreams map[uint32]*outStream // Established streams. - activeStreams *outStreamList // Streams that are sending data. + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList framer *framer hBuf *bytes.Buffer // The buffer for HPACK encoding. hEnc *hpack.Encoder // HPACK encoder. @@ -372,6 +405,21 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato const minBatchSize = 1000 // run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the activeStreams linked-list. +// This results in writing of HTTP2 frames into an underlying write buffer. +// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. +// As an optimization, to increase the batch size for each flush, loopy yields the processor, once +// if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { defer func() { if err == ErrConnClosing { @@ -696,21 +744,30 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { return nil } +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. func (l *loopyWriter) processData() (bool, error) { if l.sendQuota == 0 { return true, nil } - str := l.activeStreams.dequeue() + str := l.activeStreams.dequeue() // Remove the first stream. if str == nil { return true, nil } - dataItem := str.itl.peek().(*dataFrame) - if len(dataItem.h) == 0 && len(dataItem.d) == 0 { + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possilbe HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame // Client sends out empty data frame with endStream = true if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { return false, err } - str.itl.dequeue() + str.itl.dequeue() // remove the empty data item from stream if str.itl.isEmpty() { str.state = empty } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. @@ -739,21 +796,20 @@ func (l *loopyWriter) processData() (bool, error) { if len(buf) < size { size = len(buf) } - if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. str.state = waitingOnStreamQuota return false, nil } else if strQuota < size { size = strQuota } - if l.sendQuota < uint32(size) { + if l.sendQuota < uint32(size) { // connection-level flow control. size = int(l.sendQuota) } // Now that outgoing flow controls are checked we can replenish str's write quota str.wq.replenish(size) var endStream bool - // This last data message on this stream and all - // of it can be written in this go. + // If this is the last data message on this stream and all of it can be written in this iteration. if dataItem.endStream && size == len(buf) { // buf contains either data or it contains header but data is empty. if idx == 1 || len(dataItem.d) == 0 { diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 0000000..9fa306b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,49 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) diff --git a/vendor/google.golang.org/grpc/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go similarity index 86% rename from vendor/google.golang.org/grpc/transport/flowcontrol.go rename to vendor/google.golang.org/grpc/internal/transport/flowcontrol.go index bbf98b6..5ea997a 100644 --- a/vendor/google.golang.org/grpc/transport/flowcontrol.go +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -23,30 +23,6 @@ import ( "math" "sync" "sync/atomic" - "time" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - infinity = time.Duration(math.MaxInt64) - defaultClientKeepaliveTime = infinity - defaultClientKeepaliveTimeout = 20 * time.Second - defaultMaxStreamsClient = 100 - defaultMaxConnectionIdle = infinity - defaultMaxConnectionAge = infinity - defaultMaxConnectionAgeGrace = infinity - defaultServerKeepaliveTime = 2 * time.Hour - defaultServerKeepaliveTimeout = 20 * time.Second - defaultKeepalivePolicyMinTime = 5 * time.Minute - // max window limit set by HTTP2 Specs. - maxWindowSize = math.MaxInt32 - // defaultWriteQuota is the default value for number of data - // bytes that each stream can schedule before some of it being - // flushed out. - defaultWriteQuota = 64 * 1024 ) // writeQuota is a soft limit on the amount of data a stream can diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/internal/transport/go16.go similarity index 77% rename from vendor/google.golang.org/grpc/transport/go16.go rename to vendor/google.golang.org/grpc/internal/transport/go16.go index 5babcf9..e0d0011 100644 --- a/vendor/google.golang.org/grpc/transport/go16.go +++ b/vendor/google.golang.org/grpc/internal/transport/go16.go @@ -25,6 +25,7 @@ import ( "net/http" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "golang.org/x/net/context" ) @@ -34,15 +35,15 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error) return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) } -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { switch err { case context.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) + return status.Error(codes.DeadlineExceeded, err.Error()) case context.Canceled: - return streamErrorf(codes.Canceled, "%v", err) + return status.Error(codes.Canceled, err.Error()) } - return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) } // contextFromRequest returns a background context. diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/internal/transport/go17.go similarity index 78% rename from vendor/google.golang.org/grpc/transport/go17.go rename to vendor/google.golang.org/grpc/internal/transport/go17.go index b7fa6bd..4d515b0 100644 --- a/vendor/google.golang.org/grpc/transport/go17.go +++ b/vendor/google.golang.org/grpc/internal/transport/go17.go @@ -26,6 +26,7 @@ import ( "net/http" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" netctx "golang.org/x/net/context" ) @@ -35,15 +36,15 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error) return (&net.Dialer{}).DialContext(ctx, network, address) } -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { switch err { case context.DeadlineExceeded, netctx.DeadlineExceeded: - return streamErrorf(codes.DeadlineExceeded, "%v", err) + return status.Error(codes.DeadlineExceeded, err.Error()) case context.Canceled, netctx.Canceled: - return streamErrorf(codes.Canceled, "%v", err) + return status.Error(codes.Canceled, err.Error()) } - return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) } // contextFromRequest returns a context from the HTTP Request. diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go similarity index 96% rename from vendor/google.golang.org/grpc/transport/handler_server.go rename to vendor/google.golang.org/grpc/internal/transport/handler_server.go index f71b748..c6fb4b9 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -80,7 +80,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) + return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) } st.timeoutSet = true st.timeout = to @@ -98,7 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err) + return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) } metakv = append(metakv, k, v) } @@ -237,9 +237,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro if ht.stats != nil { ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) } - ht.Close() close(ht.writes) } + ht.Close() return err } @@ -274,9 +274,7 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts ht.writeCommonHeaders(s) ht.rw.Write(hdr) ht.rw.Write(data) - if !opts.Delay { - ht.rw.(http.Flusher).Flush() - } + ht.rw.(http.Flusher).Flush() }) } @@ -328,11 +326,11 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace go func() { select { case <-requestOver: - return case <-ht.closedCh: case <-clientGone: } cancel() + ht.Close() }() req := ht.req @@ -434,18 +432,18 @@ func (ht *serverHandlerTransport) Drain() { // * io.EOF // * io.ErrUnexpectedEOF // * of type transport.ConnectionError -// * of type transport.StreamError +// * an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err } if se, ok := err.(http2.StreamError); ok { if code, ok := http2ErrConvTab[se.Code]; ok { - return StreamError{ - Code: code, - Desc: se.Error(), - } + return status.Error(code, se.Error()) } } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } return connectionErrorf(true, err, err.Error()) } diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go similarity index 87% rename from vendor/google.golang.org/grpc/transport/http2_client.go rename to vendor/google.golang.org/grpc/internal/transport/http2_client.go index eaf007e..904e790 100644 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -22,6 +22,7 @@ import ( "io" "math" "net" + "strconv" "strings" "sync" "sync/atomic" @@ -84,6 +85,9 @@ type http2Client struct { initialWindowSize int32 + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + bdpEst *bdpEstimator // onSuccess is a callback that client transport calls upon // receiving server preface to signal that a succefull HTTP2 @@ -107,19 +111,7 @@ type http2Client struct { // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number - czmu sync.RWMutex - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // The number of streams that have ended successfully by receiving EoS bit set - // frame from server. - streamsSucceeded int64 - streamsFailed int64 - lastStreamCreated time.Time - msgSent int64 - msgRecv int64 - lastMsgSent time.Time - lastMsgRecv time.Time + czData *channelzData } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { @@ -148,7 +140,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -196,13 +188,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne icwz = opts.InitialConnWindowSize dynamicWindow = false } - writeBufSize := defaultWriteBufSize - if opts.WriteBufferSize > 0 { - writeBufSize = opts.WriteBufferSize - } - readBufSize := defaultReadBufSize - if opts.ReadBufferSize > 0 { - readBufSize = opts.ReadBufferSize + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize } t := &http2Client{ ctx: ctx, @@ -218,7 +208,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne writerDone: make(chan struct{}), goAway: make(chan struct{}), awakenKeepalive: make(chan struct{}, 1), - framer: newFramer(conn, writeBufSize, readBufSize), + framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -232,6 +222,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), } t.controlBuf = newControlBuffer(t.ctxDone) if opts.InitialWindowSize >= defaultWindowSize { @@ -278,14 +269,21 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne t.Close() return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) } + var ss []http2.Setting + if t.initialWindowSize != defaultWindowSize { - err = t.framer.fr.WriteSettings(http2.Setting{ + ss = append(ss, http2.Setting{ ID: http2.SettingInitialWindowSize, Val: uint32(t.initialWindowSize), }) - } else { - err = t.framer.fr.WriteSettings() } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) if err != nil { t.Close() return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) @@ -379,6 +377,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) @@ -464,7 +465,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s return nil, err } - return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err) + return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2. @@ -482,11 +483,11 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // options, then both sets of credentials will be applied. if callCreds := callHdr.Creds; callCreds != nil { if !t.isSecure && callCreds.RequireTransportSecurity() { - return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, streamErrorf(codes.Internal, "transport: %v", err) + return nil, status.Errorf(codes.Internal, "transport: %v", err) } for k, v := range data { // Capital header names are illegal in HTTP/2 @@ -538,10 +539,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } t.activeStreams[id] = s if channelz.IsOn() { - t.czmu.Lock() - t.streamsStarted++ - t.lastStreamCreated = time.Now() - t.czmu.Unlock() + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) } var sendPing bool // If the number of active streams change from 0 to 1, then check if keepalive @@ -590,14 +589,40 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } return true } + var hdrListSizeErr error + checkForHeaderListSize := func(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } for { - success, err := t.controlBuf.executeAndPut(checkForStreamQuota, hdr) + success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + if !checkForStreamQuota(it) { + return false + } + if !checkForHeaderListSize(it) { + return false + } + return true + }, hdr) if err != nil { return nil, err } if success { break } + if hdrListSizeErr != nil { + return nil, hdrListSizeErr + } firstTry = false select { case <-ch: @@ -633,7 +658,7 @@ func (t *http2Client) CloseStream(s *Stream, err error) { rst = true rstCode = http2.ErrCodeCancel } - t.closeStream(s, err, rst, rstCode, nil, nil, false) + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) } func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { @@ -657,6 +682,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. close(s.done) // If headerChan isn't closed, then close it. if atomic.SwapUint32(&s.headerDone, 1) == 0 { + s.noHeaders = true close(s.headerChan) } cleanup := &cleanupStream{ @@ -668,13 +694,11 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. } t.mu.Unlock() if channelz.IsOn() { - t.czmu.Lock() if eosReceived { - t.streamsSucceeded++ + atomic.AddInt64(&t.czData.streamsSucceeded, 1) } else { - t.streamsFailed++ + atomic.AddInt64(&t.czData.streamsFailed, 1) } - t.czmu.Unlock() } }, rst: rst, @@ -715,7 +739,7 @@ func (t *http2Client) Close() error { } // Notify all active streams. for _, s := range streams { - t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, nil, nil, false) + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false) } if t.statsHandler != nil { connEnd := &stats.ConnEnd{ @@ -909,6 +933,13 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) statusCode = codes.Unknown } + if statusCode == codes.Canceled { + // Our deadline was already exceeded, and that was likely the cause of + // this cancelation. Alter the status code accordingly. + if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) { + statusCode = codes.DeadlineExceeded + } + } t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) } @@ -918,13 +949,20 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } var maxStreams *uint32 var ss []http2.Setting + var updateFuncs []func() f.ForeachSetting(func(s http2.Setting) error { - if s.ID == http2.SettingMaxConcurrentStreams { + switch s.ID { + case http2.SettingMaxConcurrentStreams: maxStreams = new(uint32) *maxStreams = s.Val - return nil + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) } - ss = append(ss, s) return nil }) if isFirst && maxStreams == nil { @@ -934,21 +972,24 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { sf := &incomingSettings{ ss: ss, } - if maxStreams == nil { - t.controlBuf.put(sf) - return + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) } - updateStreamQuota := func(interface{}) bool { - delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) - t.maxConcurrentStreams = *maxStreams - t.streamQuota += delta - if delta > 0 && t.waitingStreams > 0 { - close(t.streamsQuotaAvailable) // wake all of them up. - t.streamsQuotaAvailable = make(chan struct{}, 1) + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() } return true - } - t.controlBuf.executeAndPut(updateStreamQuota, sf) + }, sf) } func (t *http2Client) handlePing(f *http2.PingFrame) { @@ -1059,8 +1100,8 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { } atomic.StoreUint32(&s.bytesReceived, 1) var state decodeState - if err := state.decodeResponseHeader(frame); err != nil { - t.closeStream(s, err, true, http2.ErrCodeProtocol, nil, nil, false) + if err := state.decodeHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false) // Something wrong. Stops reading even when there is remaining. return } @@ -1096,6 +1137,8 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(state.mdata) > 0 { s.header = state.mdata } + } else { + s.noHeaders = true } close(s.headerChan) } @@ -1146,7 +1189,9 @@ func (t *http2Client) reader() { t.mu.Unlock() if s != nil { // use error detail to provide better err message - t.closeStream(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()), true, http2.ErrCodeProtocol, nil, nil, false) + code := http2ErrConvTab[se.Code] + msg := t.framer.fr.ErrorDetail().Error() + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) } continue } else { @@ -1203,9 +1248,7 @@ func (t *http2Client) keepalive() { } else { t.mu.Unlock() if channelz.IsOn() { - t.czmu.Lock() - t.kpCount++ - t.czmu.Unlock() + atomic.AddInt64(&t.czData.kpCount, 1) } // Send ping. t.controlBuf.put(p) @@ -1245,41 +1288,37 @@ func (t *http2Client) GoAway() <-chan struct{} { } func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - t.czmu.RLock() s := channelz.SocketInternalMetric{ - StreamsStarted: t.streamsStarted, - StreamsSucceeded: t.streamsSucceeded, - StreamsFailed: t.streamsFailed, - MessagesSent: t.msgSent, - MessagesReceived: t.msgRecv, - KeepAlivesSent: t.kpCount, - LastLocalStreamCreatedTimestamp: t.lastStreamCreated, - LastMessageSentTimestamp: t.lastMsgSent, - LastMessageReceivedTimestamp: t.lastMsgRecv, + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), - //socket options - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // Security + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, // RemoteName : } - t.czmu.RUnlock() + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } s.RemoteFlowControlWindow = t.getOutFlowWindow() return &s } func (t *http2Client) IncrMsgSent() { - t.czmu.Lock() - t.msgSent++ - t.lastMsgSent = time.Now() - t.czmu.Unlock() + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) } func (t *http2Client) IncrMsgRecv() { - t.czmu.Lock() - t.msgRecv++ - t.lastMsgRecv = time.Now() - t.czmu.Unlock() + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go similarity index 87% rename from vendor/google.golang.org/grpc/transport/http2_server.go rename to vendor/google.golang.org/grpc/internal/transport/http2_server.go index 19acedb..efb7f53 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -48,9 +48,14 @@ import ( "google.golang.org/grpc/tap" ) -// ErrIllegalHeaderWrite indicates that setting header is illegal because of -// the stream's state. -var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer") +) // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { @@ -89,9 +94,10 @@ type http2Server struct { // Flag to signify that number of ping strikes should be reset to 0. // This is set whenever data or header frames are sent. // 1 means yes. - resetPingStrikes uint32 // Accessed atomically. - initialWindowSize int32 - bdpEst *bdpEstimator + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 mu sync.Mutex // guard the following @@ -112,33 +118,19 @@ type http2Server struct { // Fields below are for channelz metric collection. channelzID int64 // channelz unique identification number - czmu sync.RWMutex - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // The number of streams that have ended successfully by sending frame with - // EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - lastStreamCreated time.Time - msgSent int64 - msgRecv int64 - lastMsgSent time.Time - lastMsgRecv time.Time + czData *channelzData } // newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is // returned if something goes wrong. func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { - writeBufSize := defaultWriteBufSize - if config.WriteBufferSize > 0 { - writeBufSize = config.WriteBufferSize - } - readBufSize := defaultReadBufSize - if config.ReadBufferSize > 0 { - readBufSize = config.ReadBufferSize + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize) + framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) // Send initial settings as connection preface to client. var isettings []http2.Setting // TODO(zhaoq): Have a better way to signal "no limit" because 0 is @@ -168,6 +160,12 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err ID: http2.SettingInitialWindowSize, Val: uint32(iwz)}) } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } if err := framer.fr.WriteSettings(isettings...); err != nil { return nil, connectionErrorf(false, err, "transport: %v", err) } @@ -221,6 +219,7 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err idle: time.Now(), kep: kep, initialWindowSize: iwz, + czData: new(channelzData), } t.controlBuf = newControlBuffer(t.ctxDone) if dynamicWindow { @@ -285,21 +284,19 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err } // operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { streamID := frame.Header().StreamID - var state decodeState - for _, hf := range frame.Fields { - if err := state.processHeaderField(hf); err != nil { - if se, ok := err.(StreamError); ok { - t.controlBuf.put(&cleanupStream{ - streamID: streamID, - rst: true, - rstCode: statusCodeConvTab[se.Code], - onWrite: func() {}, - }) - } - return + state := decodeState{serverSide: true} + if err := state.decodeHeader(frame); err != nil { + if se, ok := status.FromError(err); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code()], + onWrite: func() {}, + }) } + return false } buf := newRecvBuffer() @@ -353,13 +350,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) - return + return false } } t.mu.Lock() if t.state != reachable { t.mu.Unlock() - return + return false } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -369,7 +366,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeRefusedStream, onWrite: func() {}, }) - return + return false } if streamID%2 != 1 || streamID <= t.maxStreamID { t.mu.Unlock() @@ -384,10 +381,8 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } t.mu.Unlock() if channelz.IsOn() { - t.czmu.Lock() - t.streamsStarted++ - t.lastStreamCreated = time.Now() - t.czmu.Unlock() + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) @@ -422,7 +417,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return + return false } // HandleStreams receives incoming streams using the given handler. This is @@ -619,11 +614,25 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { return } var ss []http2.Setting + var updateFuncs []func() f.ForeachSetting(func(s http2.Setting) error { - ss = append(ss, s) + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } return nil }) - t.controlBuf.put(&incomingSettings{ + t.controlBuf.executeAndPut(func(interface{}) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ ss: ss, }) } @@ -703,6 +712,21 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } +func (t *http2Server) checkForHeaderListSize(it interface{}) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + return false + } + } + return true +} + // WriteHeader sends the header metedata md back to the client. func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { if s.updateHeaderSent() || s.getState() == streamDone { @@ -716,12 +740,15 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { s.header = md } } - t.writeHeaderLocked(s) + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } s.hdrMu.Unlock() return nil } -func (t *http2Server) writeHeaderLocked(s *Stream) { +func (t *http2Server) writeHeaderLocked(s *Stream) error { // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields // first and create a slice of that exact size. headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. @@ -731,7 +758,7 @@ func (t *http2Server) writeHeaderLocked(s *Stream) { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) } headerFields = appendHeaderFieldsFromMD(headerFields, s.header) - t.controlBuf.put(&headerFrame{ + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ streamID: s.id, hf: headerFields, endStream: false, @@ -739,12 +766,20 @@ func (t *http2Server) writeHeaderLocked(s *Stream) { atomic.StoreUint32(&t.resetPingStrikes, 1) }, }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, nil, false) + return ErrHeaderListSizeLimitViolation + } if t.stats != nil { // Note: WireLength is not set in outHeader. // TODO(mmukhi): Revisit this later, if needed. outHeader := &stats.OutHeader{} t.stats.HandleRPC(s.Context(), outHeader) } + return nil } // WriteStatus sends stream status to the client and terminates the stream. @@ -761,7 +796,10 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. if !s.updateHeaderSent() { // No headers have been sent. if len(s.header) > 0 { // Send a separate header frame. - t.writeHeaderLocked(s) + if err := t.writeHeaderLocked(s); err != nil { + s.hdrMu.Unlock() + return err + } } else { // Send a trailer only response. headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) @@ -791,6 +829,14 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { }, } s.hdrMu.Unlock() + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, nil, false) + return ErrHeaderListSizeLimitViolation + } t.closeStream(s, false, 0, trailingHeader, true) if t.stats != nil { t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) @@ -804,7 +850,7 @@ func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) e if !s.isHeaderSent() { // Headers haven't been written yet. if err := t.WriteHeader(s, nil); err != nil { // TODO(mmukhi, dfawley): Make sure this is the right code to return. - return streamErrorf(codes.Internal, "transport: %v", err) + return status.Errorf(codes.Internal, "transport: %v", err) } } else { // Writing headers checks for this condition. @@ -918,9 +964,7 @@ func (t *http2Server) keepalive() { } pingSent = true if channelz.IsOn() { - t.czmu.Lock() - t.kpCount++ - t.czmu.Unlock() + atomic.AddInt64(&t.czData.kpCount, 1) } t.controlBuf.put(p) keepalive.Reset(t.kp.Timeout) @@ -985,13 +1029,11 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hd } t.mu.Unlock() if channelz.IsOn() { - t.czmu.Lock() if eosReceived { - t.streamsSucceeded++ + atomic.AddInt64(&t.czData.streamsSucceeded, 1) } else { - t.streamsFailed++ + atomic.AddInt64(&t.czData.streamsFailed, 1) } - t.czmu.Unlock() } }, } @@ -1079,41 +1121,37 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { } func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - t.czmu.RLock() s := channelz.SocketInternalMetric{ - StreamsStarted: t.streamsStarted, - StreamsSucceeded: t.streamsSucceeded, - StreamsFailed: t.streamsFailed, - MessagesSent: t.msgSent, - MessagesReceived: t.msgRecv, - KeepAlivesSent: t.kpCount, - LastRemoteStreamCreatedTimestamp: t.lastStreamCreated, - LastMessageSentTimestamp: t.lastMsgSent, - LastMessageReceivedTimestamp: t.lastMsgRecv, + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), - //socket options - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // Security + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, // RemoteName : } - t.czmu.RUnlock() + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } s.RemoteFlowControlWindow = t.getOutFlowWindow() return &s } func (t *http2Server) IncrMsgSent() { - t.czmu.Lock() - t.msgSent++ - t.lastMsgSent = time.Now() - t.czmu.Unlock() + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) } func (t *http2Server) IncrMsgRecv() { - t.czmu.Lock() - t.msgRecv++ - t.lastMsgRecv = time.Now() - t.czmu.Unlock() + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) } func (t *http2Server) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go similarity index 89% rename from vendor/google.golang.org/grpc/transport/http_util.go rename to vendor/google.golang.org/grpc/internal/transport/http_util.go index 7d15c7d..21da6e8 100644 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -23,6 +23,7 @@ import ( "bytes" "encoding/base64" "fmt" + "io" "net" "net/http" "strconv" @@ -43,9 +44,6 @@ const ( http2MaxFrameLen = 16384 // 16KB frame // http://http2.github.io/http2-spec/#SettingValues http2InitHeaderTableSize = 4096 - // http2IOBufSize specifies the buffer size for sending frames. - defaultWriteBufSize = 32 * 1024 - defaultReadBufSize = 32 * 1024 // baseContentType is the base content-type for gRPC. This is a valid // content-type on it's own, but can also include a content-subtype such as // "proto" as a suffix after "+" or ";". See @@ -121,6 +119,8 @@ type decodeState struct { statsTags []byte statsTrace []byte contentSubtype string + // whether decoding on server side or not + serverSide bool } // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -139,6 +139,9 @@ func isReservedHeader(hdr string) bool { "grpc-status", "grpc-timeout", "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. "te": return true default: @@ -146,8 +149,8 @@ func isReservedHeader(hdr string) bool { } } -// isWhitelistedHeader checks whether hdr should be propagated -// into metadata visible to users. +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. func isWhitelistedHeader(hdr string) bool { switch hdr { case ":authority", "user-agent": @@ -234,13 +237,22 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { +func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error { + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + return status.Error(codes.Internal, "peer header list size exceeded limit") + } for _, hf := range frame.Fields { if err := d.processHeaderField(hf); err != nil { return err } } + if d.serverSide { + return nil + } + // If grpc status exists, no need to check further. if d.rawStatusCode != nil || d.statusGen != nil { return nil @@ -249,7 +261,7 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error // If grpc status doesn't exist and http status doesn't exist, // then it's a malformed header. if d.httpStatus == nil { - return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") } if *(d.httpStatus) != http.StatusOK { @@ -257,7 +269,7 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error if !ok { code = codes.Unknown } - return streamErrorf(code, http.StatusText(*(d.httpStatus))) + return status.Error(code, http.StatusText(*(d.httpStatus))) } // gRPC status doesn't exist and http status is OK. @@ -269,7 +281,6 @@ func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error code := int(codes.Unknown) d.rawStatusCode = &code return nil - } func (d *decodeState) addMetadata(k, v string) { @@ -284,7 +295,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { case "content-type": contentSubtype, validContentType := contentSubtype(f.Value) if !validContentType { - return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) + return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) } d.contentSubtype = contentSubtype // TODO: do we want to propagate the whole content-type in the metadata, @@ -297,7 +308,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { case "grpc-status": code, err := strconv.Atoi(f.Value) if err != nil { - return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) + return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err) } d.rawStatusCode = &code case "grpc-message": @@ -305,38 +316,38 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error { case "grpc-status-details-bin": v, err := decodeBinHeader(f.Value) if err != nil { - return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) } s := &spb.Status{} if err := proto.Unmarshal(v, s); err != nil { - return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) } d.statusGen = status.FromProto(s) case "grpc-timeout": d.timeoutSet = true var err error if d.timeout, err = decodeTimeout(f.Value); err != nil { - return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err) + return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err) } case ":path": d.method = f.Value case ":status": code, err := strconv.Atoi(f.Value) if err != nil { - return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) + return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err) } d.httpStatus = &code case "grpc-tags-bin": v, err := decodeBinHeader(f.Value) if err != nil { - return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) } d.statsTags = v d.addMetadata(f.Name, string(v)) case "grpc-trace-bin": v, err := decodeBinHeader(f.Value) if err != nil { - return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) } d.statsTrace = v d.addMetadata(f.Name, string(v)) @@ -545,6 +556,9 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { if w.err != nil { return 0, w.err } + if w.batchSize == 0 { // Buffer has been disabled. + return w.conn.Write(b) + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] @@ -577,8 +591,14 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { - r := bufio.NewReaderSize(conn, readBufferSize) +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } w := newBufWriter(conn, writeBufferSize) f := &framer{ writer: w, @@ -587,6 +607,7 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { // Opt-in to Frame reuse API on framer to reduce garbage. // Frames aren't safe to read from after a subsequent call to ReadFrame. f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) return f } diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go similarity index 90% rename from vendor/google.golang.org/grpc/transport/log.go rename to vendor/google.golang.org/grpc/internal/transport/log.go index ac8e358..879df80 100644 --- a/vendor/google.golang.org/grpc/transport/log.go +++ b/vendor/google.golang.org/grpc/internal/transport/log.go @@ -42,9 +42,3 @@ func errorf(format string, args ...interface{}) { grpclog.Errorf(format, args...) } } - -func fatalf(format string, args ...interface{}) { - if grpclog.V(logLevel) { - grpclog.Fatalf(format, args...) - } -} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go similarity index 90% rename from vendor/google.golang.org/grpc/transport/transport.go rename to vendor/google.golang.org/grpc/internal/transport/transport.go index f51f878..fdf8ad6 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -19,7 +19,7 @@ // Package transport defines and implements message oriented communication // channel to complete various transactions (e.g., an RPC). It is meant for // grpc-internal usage and is not intended to be imported directly by users. -package transport // externally used as import "google.golang.org/grpc/transport" +package transport import ( "errors" @@ -176,7 +176,6 @@ type Stream struct { buf *recvBuffer trReader io.Reader fc *inFlow - recvQuota uint32 wq *writeQuota // Callback to state application's intentions to read data. This @@ -191,6 +190,8 @@ type Stream struct { header metadata.MD // the received header metadata. trailer metadata.MD // the key-value map of trailer metadata. + noHeaders bool // set if the client never received headers (set only after the stream is done). + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. headerSent uint32 @@ -259,7 +260,7 @@ func (s *Stream) SetSendCompress(str string) { s.sendCompress = str } -// Done returns a chanel which is closed when it receives the final status +// Done returns a channel which is closed when it receives the final status // from the server. func (s *Stream) Done() <-chan struct{} { return s.done @@ -282,6 +283,19 @@ func (s *Stream) Header() (metadata.MD, error) { return nil, err } +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. If a context error happens +// first, returns it as a status error. Client-side only. +func (s *Stream) TrailersOnly() (bool, error) { + err := s.waitOnHeader() + if err != nil { + return false, err + } + // if !headerDone, some other connection error occurred. + return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil +} + // Trailer returns the cached trailer metedata. Note that if it is not called // after the entire stream is done, it could return an empty MD. Client // side only. @@ -292,12 +306,6 @@ func (s *Stream) Trailer() metadata.MD { return c } -// ServerTransport returns the underlying ServerTransport for the stream. -// The client side stream always returns nil. -func (s *Stream) ServerTransport() ServerTransport { - return s.st -} - // ContentSubtype returns the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". This will always be lowercase. See @@ -319,7 +327,7 @@ func (s *Stream) Method() string { // Status returns the status received from the server. // Status can be read safely only after the stream has ended, -// that is, read or write has returned io.EOF. +// that is, after Done() is closed. func (s *Stream) Status() *status.Status { return s.status } @@ -344,8 +352,7 @@ func (s *Stream) SetHeader(md metadata.MD) error { // combined with any metadata set by previous calls to SetHeader and // then written to the transport stream. func (s *Stream) SendHeader(md metadata.MD) error { - t := s.ServerTransport() - return t.WriteHeader(s, md) + return s.st.WriteHeader(s, md) } // SetTrailer sets the trailer metadata which will be sent with the RPC status @@ -439,6 +446,7 @@ type ServerConfig struct { WriteBufferSize int ReadBufferSize int ChannelzParentID int64 + MaxHeaderListSize *uint32 } // NewServerTransport creates a ServerTransport with conn or non-nil error @@ -451,9 +459,6 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S type ConnectOptions struct { // UserAgent is the application user agent. UserAgent string - // Authority is the :authority pseudo-header to use. This field has no effect if - // TransportCredentials is set. - Authority string // Dialer specifies how to dial a network address. Dialer func(context.Context, string) (net.Conn, error) // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. @@ -476,6 +481,8 @@ type ConnectOptions struct { ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID int64 + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 } // TargetInfo contains the information of the target such as network address and metadata. @@ -497,11 +504,6 @@ type Options struct { // Last indicates whether this write is the last piece for // this stream. Last bool - - // Delay is a hint to the transport implementation for whether - // the data could be buffered for a batching write. The - // transport implementation may ignore the hint. - Delay bool } // CallHdr carries the information of a particular RPC. @@ -519,14 +521,6 @@ type CallHdr struct { // Creds specifies credentials.PerRPCCredentials for a call. Creds credentials.PerRPCCredentials - // Flush indicates whether a new stream command should be sent - // to the peer without waiting for the first data. This is - // only a hint. - // If it's true, the transport may modify the flush decision - // for performance purposes. - // If it's false, new stream will never be flushed. - Flush bool - // ContentSubtype specifies the content-subtype for a request. For example, a // content-subtype of "proto" will result in a content-type of // "application/grpc+proto". The value of ContentSubtype must be all @@ -534,6 +528,8 @@ type CallHdr struct { // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests // for more details. ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set } // ClientTransport is the common interface for all gRPC client-side transport @@ -622,14 +618,6 @@ type ServerTransport interface { IncrMsgRecv() } -// streamErrorf creates an StreamError with the specified error code and description. -func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { - return StreamError{ - Code: c, - Desc: fmt.Sprintf(format, a...), - } -} - // connectionErrorf creates an ConnectionError with the specified error description. func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { return ConnectionError{ @@ -672,7 +660,7 @@ var ( // errStreamDrain indicates that the stream is rejected because the // connection is draining. This could be caused by goaway or balancer // removing the address. - errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") // errStreamDone is returned from write at the client side to indiacte application // layer of an error. errStreamDone = errors.New("the stream is done") @@ -681,18 +669,6 @@ var ( statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") ) -// TODO: See if we can replace StreamError with status package errors. - -// StreamError is an error that only affects one stream within a connection. -type StreamError struct { - Code codes.Code - Desc string -} - -func (e StreamError) Error() string { - return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) -} - // GoAwayReason contains the reason for the GoAway frame received. type GoAwayReason uint8 @@ -706,3 +682,27 @@ const ( // "too_many_pings". GoAwayTooManyPings GoAwayReason = 2 ) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 019e658..76cc456 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -21,17 +21,14 @@ package grpc import ( "io" "sync" - "sync/atomic" "golang.org/x/net/context" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/resolver" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick @@ -45,16 +42,10 @@ type pickerWrapper struct { // The latest connection happened. connErrMu sync.Mutex connErr error - - stickinessMDKey atomic.Value - stickiness *stickyStore } func newPickerWrapper() *pickerWrapper { - bp := &pickerWrapper{ - blockingCh: make(chan struct{}), - stickiness: newStickyStore(), - } + bp := &pickerWrapper{blockingCh: make(chan struct{})} return bp } @@ -71,27 +62,6 @@ func (bp *pickerWrapper) connectionError() error { return err } -func (bp *pickerWrapper) updateStickinessMDKey(newKey string) { - // No need to check ok because mdKey == "" if ok == false. - if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey { - bp.stickinessMDKey.Store(newKey) - bp.stickiness.reset(newKey) - } -} - -func (bp *pickerWrapper) getStickinessMDKey() string { - // No need to check ok because mdKey == "" if ok == false. - mdKey, _ := bp.stickinessMDKey.Load().(string) - return mdKey -} - -func (bp *pickerWrapper) clearStickinessState() { - if oldKey := bp.getStickinessMDKey(); oldKey != "" { - // There's no need to reset store if mdKey was "". - bp.stickiness.reset(oldKey) - } -} - // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (bp *pickerWrapper) updatePicker(p balancer.Picker) { bp.mu.Lock() @@ -131,27 +101,6 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { - - mdKey := bp.getStickinessMDKey() - stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey) - - // Potential race here: if stickinessMDKey is updated after the above two - // lines, and this pick is a sticky pick, the following put could add an - // entry to sticky store with an outdated sticky key. - // - // The solution: keep the current md key in sticky store, and at the - // beginning of each get/put, check the mdkey against store.curMDKey. - // - Cons: one more string comparing for each get/put. - // - Pros: the string matching happens inside get/put, so the overhead for - // non-sticky RPCs will be minimal. - - if isSticky { - if t, ok := bp.stickiness.get(mdKey, stickyKey); ok { - // Done function returned is always nil. - return t, nil, nil - } - } - var ( p balancer.Picker ch chan struct{} @@ -207,9 +156,6 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer. continue } if t, ok := acw.getAddrConn().getReadyTransport(); ok { - if isSticky { - bp.stickiness.put(mdKey, stickyKey, acw) - } if channelz.IsOn() { return t, doneChannelzWrapper(acw, done), nil } @@ -232,105 +178,3 @@ func (bp *pickerWrapper) close() { bp.done = true close(bp.blockingCh) } - -const stickinessKeyCountLimit = 1000 - -type stickyStoreEntry struct { - acw *acBalancerWrapper - addr resolver.Address -} - -type stickyStore struct { - mu sync.Mutex - // curMDKey is check before every get/put to avoid races. The operation will - // abort immediately when the given mdKey is different from the curMDKey. - curMDKey string - store *linkedMap -} - -func newStickyStore() *stickyStore { - return &stickyStore{ - store: newLinkedMap(), - } -} - -// reset clears the map in stickyStore, and set the currentMDKey to newMDKey. -func (ss *stickyStore) reset(newMDKey string) { - ss.mu.Lock() - ss.curMDKey = newMDKey - ss.store.clear() - ss.mu.Unlock() -} - -// stickyKey is the key to look up in store. mdKey will be checked against -// curMDKey to avoid races. -func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) { - ss.mu.Lock() - defer ss.mu.Unlock() - if mdKey != ss.curMDKey { - return - } - // TODO(stickiness): limit the total number of entries. - ss.store.put(stickyKey, &stickyStoreEntry{ - acw: acw, - addr: acw.getAddrConn().getCurAddr(), - }) - if ss.store.len() > stickinessKeyCountLimit { - ss.store.removeOldest() - } -} - -// stickyKey is the key to look up in store. mdKey will be checked against -// curMDKey to avoid races. -func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) { - ss.mu.Lock() - defer ss.mu.Unlock() - if mdKey != ss.curMDKey { - return nil, false - } - entry, ok := ss.store.get(stickyKey) - if !ok { - return nil, false - } - ac := entry.acw.getAddrConn() - if ac.getCurAddr() != entry.addr { - ss.store.remove(stickyKey) - return nil, false - } - t, ok := ac.getReadyTransport() - if !ok { - ss.store.remove(stickyKey) - return nil, false - } - return t, true -} - -// Get one value from metadata in ctx with key stickinessMDKey. -// -// It returns "", false if stickinessMDKey is an empty string. -func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) { - if stickinessMDKey == "" { - return "", false - } - - md, added, ok := metadata.FromOutgoingContextRaw(ctx) - if !ok { - return "", false - } - - if vv, ok := md[stickinessMDKey]; ok { - if len(vv) > 0 { - return vv[0], true - } - } - - for _, ss := range added { - for i := 0; i < len(ss)-1; i += 2 { - if ss[i] == stickinessMDKey { - return ss[i+1], true - } - } - } - - return "", false -} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 048fde6..4ce8167 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -33,6 +33,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" ) @@ -51,23 +52,29 @@ const ( ) var ( - errMissingAddr = errors.New("missing address") + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") ) // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. func NewBuilder() resolver.Builder { - return &dnsBuilder{freq: defaultFreq} + return &dnsBuilder{minFreq: defaultFreq} } type dnsBuilder struct { - // frequency of polling the DNS server. - freq time.Duration + // minimum frequency of polling the DNS server. + minFreq time.Duration } // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { if target.Authority != "" { - return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server") + return nil, fmt.Errorf("default DNS resolver does not support custom DNS server") } host, port, err := parseTarget(target.Endpoint) if err != nil { @@ -92,7 +99,8 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts // DNS address (non-IP). ctx, cancel := context.WithCancel(context.Background()) d := &dnsResolver{ - freq: b.freq, + freq: b.minFreq, + backoff: backoff.Exponential{MaxDelay: b.minFreq}, host: host, port: port, ctx: ctx, @@ -148,12 +156,14 @@ func (i *ipResolver) watcher() { // dnsResolver watches for the name resolution update for a non-IP target. type dnsResolver struct { - freq time.Duration - host string - port string - ctx context.Context - cancel context.CancelFunc - cc resolver.ClientConn + freq time.Duration + backoff backoff.Exponential + retryCount int + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn // rn channel is used by ResolveNow() to force an immediate resolution of the target. rn chan struct{} t *time.Timer @@ -192,8 +202,15 @@ func (d *dnsResolver) watcher() { case <-d.rn: } result, sc := d.lookup() - // Next lookup should happen after an interval defined by d.freq. - d.t.Reset(d.freq) + // Next lookup should happen within an interval defined by d.freq. It may be + // more often due to exponential retry on empty address list. + if len(result) == 0 { + d.retryCount++ + d.t.Reset(d.backoff.Backoff(d.retryCount)) + } else { + d.retryCount = 0 + d.t.Reset(d.freq) + } d.cc.NewServiceConfig(sc) d.cc.NewAddress(result) } @@ -297,7 +314,6 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: "ipv4-host:80" returns host: "ipv4-host", port: "80" // target: "[ipv6-host]" returns host: "ipv6-host", port: "443" // target: ":80" returns host: "localhost", port: "80" -// target: ":" returns host: "localhost", port: "443" func parseTarget(target string) (host, port string, err error) { if target == "" { return "", "", errMissingAddr @@ -307,15 +323,15 @@ func parseTarget(target string) (host, port string, err error) { return target, defaultPort, nil } if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + return "", "", errEndsWithColon + } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. host = "localhost" } - if port == "" { - // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. - port = defaultPort - } return host, port, nil } if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 506afac..145cf47 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -49,8 +49,12 @@ func Get(scheme string) Builder { return nil } -// SetDefaultScheme sets the default scheme that will be used. -// The default default scheme is "passthrough". +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. func SetDefaultScheme(scheme string) { defaultScheme = scheme } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 033801f..fa05683 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -36,11 +36,11 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" ) // Compressor defines the interface gRPC uses to compress a message. @@ -156,16 +156,19 @@ type callInfo struct { compressorType string failFast bool stream *clientStream - traceInfo traceInfo // in trace.go maxReceiveMessageSize *int maxSendMessageSize *int creds credentials.PerRPCCredentials contentSubtype string codec baseCodec + maxRetryRPCBufferSize int } func defaultCallInfo() *callInfo { - return &callInfo{failFast: true} + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } } // CallOption configures a Call before it starts or extracts information from @@ -415,6 +418,27 @@ func (o CustomCodecCallOption) before(c *callInfo) error { } func (o CustomCodecCallOption) after(c *callInfo) {} +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// This API is EXPERIMENTAL. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// This is an EXPERIMENTAL API. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {} + // The format of the payload: compressed or not? type payloadFormat uint8 @@ -444,7 +468,7 @@ type parser struct { // * io.EOF, when no messages remain // * io.ErrUnexpectedEOF // * of type transport.ConnectionError -// * of type transport.StreamError +// * an error from the status package // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -724,6 +748,19 @@ func parseDialTarget(target string) (net string, addr string) { return net, target } +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 5. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 014c72b..5c7d5b6 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -30,12 +30,12 @@ import ( "runtime" "strings" "sync" + "sync/atomic" "time" "io/ioutil" "golang.org/x/net/context" - "golang.org/x/net/http2" "golang.org/x/net/trace" "google.golang.org/grpc/codes" @@ -43,14 +43,13 @@ import ( "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" - "google.golang.org/grpc/transport" ) const ( @@ -106,12 +105,8 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number - czmu sync.RWMutex - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - lastCallStartedTime time.Time + channelzID int64 // channelz unique identification number + czData *channelzData } type options struct { @@ -126,7 +121,6 @@ type options struct { maxConcurrentStreams uint32 maxReceiveMessageSize int maxSendMessageSize int - useHandlerImpl bool // use http.Handler-based server unknownStreamDesc *StreamDesc keepaliveParams keepalive.ServerParameters keepalivePolicy keepalive.EnforcementPolicy @@ -135,19 +129,25 @@ type options struct { writeBufferSize int readBufferSize int connectionTimeout time.Duration + maxHeaderListSize *uint32 } var defaultServerOptions = options{ maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, } // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption func(*options) -// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched -// before doing a write on the wire. +// WriteBufferSize determines how much data can be batched before doing a write on the wire. +// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. +// The default value for this buffer is 32KB. +// Zero will disable the write buffer such that each write will be on underlying connection. +// Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return func(o *options) { o.writeBufferSize = s @@ -156,6 +156,9 @@ func WriteBufferSize(s int) ServerOption { // ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most // for one read syscall. +// The default value for this buffer is 32KB. +// Zero will disable read buffer for a connection so data framer can access the underlying +// conn directly. func ReadBufferSize(s int) ServerOption { return func(o *options) { o.readBufferSize = s @@ -335,6 +338,14 @@ func ConnectionTimeout(d time.Duration) ServerOption { } } +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return func(o *options) { + o.maxHeaderListSize = &s + } +} + // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { @@ -343,12 +354,13 @@ func NewServer(opt ...ServerOption) *Server { o(&opts) } s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[io.Closer]bool), - m: make(map[string]*service), - quit: make(chan struct{}), - done: make(chan struct{}), + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[io.Closer]bool), + m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), + czData: new(channelzData), } s.cv = sync.NewCond(&s.mu) if EnableTracing { @@ -357,7 +369,7 @@ func NewServer(opt ...ServerOption) *Server { } if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(s, "") + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") } return s } @@ -481,7 +493,8 @@ type listenSocket struct { func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { return &channelz.SocketInternalMetric{ - LocalAddr: l.Listener.Addr(), + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), } } @@ -617,27 +630,19 @@ func (s *Server) handleRawConn(rawConn net.Conn) { } s.mu.Unlock() - var serve func() - c := conn.(io.Closer) - if s.opts.useHandlerImpl { - serve = func() { s.serveUsingHandler(conn) } - } else { - // Finish handshaking (HTTP2) - st := s.newHTTP2Transport(conn, authInfo) - if st == nil { - return - } - c = st - serve = func() { s.serveStreams(st) } + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return } rawConn.SetDeadline(time.Time{}) - if !s.addConn(c) { + if !s.addConn(st) { return } go func() { - serve() - s.removeConn(c) + s.serveStreams(st) + s.removeConn(st) }() } @@ -656,6 +661,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, } st, err := transport.NewServerTransport("http2", c, config) if err != nil { @@ -691,27 +697,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { var _ http.Handler = (*Server)(nil) -// serveUsingHandler is called from handleRawConn when s is configured -// to handle requests via the http.Handler interface. It sets up a -// net/http.Server to handle the just-accepted conn. The http.Server -// is configured to route all incoming requests (all HTTP/2 streams) -// to ServeHTTP, which creates a new ServerTransport for each stream. -// serveUsingHandler blocks until conn closes. -// -// This codepath is only used when Server.TestingUseHandlerImpl has -// been configured. This lets the end2end tests exercise the ServeHTTP -// method as one of the environment types. -// -// conn is the *tls.Conn that's already been authenticated. -func (s *Server) serveUsingHandler(conn net.Conn) { - h2s := &http2.Server{ - MaxConcurrentStreams: s.opts.maxConcurrentStreams, - } - h2s.ServeConn(conn, &http2.ServeConnOpts{ - Handler: s, - }) -} - // ServeHTTP implements the Go standard library's http.Handler // interface by responding to the gRPC request r, by looking up // the requested gRPC method in the gRPC server s. @@ -794,36 +779,26 @@ func (s *Server) removeConn(c io.Closer) { } } -// ChannelzMetric returns ServerInternalMetric of current server. -// This is an EXPERIMENTAL API. -func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric { - s.czmu.RLock() - defer s.czmu.RUnlock() +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { return &channelz.ServerInternalMetric{ - CallsStarted: s.callsStarted, - CallsSucceeded: s.callsSucceeded, - CallsFailed: s.callsFailed, - LastCallStartedTimestamp: s.lastCallStartedTime, + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), } } func (s *Server) incrCallsStarted() { - s.czmu.Lock() - s.callsStarted++ - s.lastCallStartedTime = time.Now() - s.czmu.Unlock() + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) } func (s *Server) incrCallsSucceeded() { - s.czmu.Lock() - s.callsSucceeded++ - s.czmu.Unlock() + atomic.AddInt64(&s.czData.callsSucceeded, 1) } func (s *Server) incrCallsFailed() { - s.czmu.Lock() - s.callsFailed++ - s.czmu.Unlock() + atomic.AddInt64(&s.czData.callsFailed, 1) } func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { @@ -944,10 +919,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. switch st := err.(type) { case transport.ConnectionError: // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) } @@ -1028,10 +999,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if trInfo != nil { trInfo.tr.LazyLog(stringer("OK"), false) } - opts := &transport.Options{ - Last: true, - Delay: false, - } + opts := &transport.Options{Last: true} if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { @@ -1046,10 +1014,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. switch st := err.(type) { case transport.ConnectionError: // Nothing to do here. - case transport.StreamError: - if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) - } default: panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) } @@ -1099,11 +1063,11 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp } ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ - ctx: ctx, - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.getCodec(stream.ContentSubtype()), + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, trInfo: trInfo, @@ -1169,12 +1133,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - switch err := appErr.(type) { - case transport.StreamError: - appStatus = status.New(err.Code, err.Desc) - default: - appStatus = status.New(codes.Unknown, appErr.Error()) - } + appStatus = status.New(codes.Unknown, appErr.Error()) appErr = appStatus.Err() } if trInfo != nil { @@ -1410,12 +1369,6 @@ func (s *Server) GracefulStop() { s.mu.Unlock() } -func init() { - internal.TestingUseHandlerImpl = func(arg interface{}) { - arg.(*Server).opts.useHandlerImpl = true - } -} - // contentSubtype must be lowercase // cannot return nil func (s *Server) getCodec(contentSubtype string) baseCodec { @@ -1484,3 +1437,11 @@ func Method(ctx context.Context) (string, bool) { } return s.Method(), true } + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 015631d..e0d7352 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -25,6 +25,7 @@ import ( "strings" "time" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" ) @@ -56,6 +57,8 @@ type MethodConfig struct { // MaxRespSize is the maximum allowed payload size for an individual response in a // stream (server->client) in bytes. MaxRespSize *int + // RetryPolicy configures retry options for the method. + retryPolicy *retryPolicy } // ServiceConfig is provided by the service provider and contains parameters for how @@ -68,13 +71,84 @@ type ServiceConfig struct { // LB is the load balancer the service providers recommends. The balancer specified // via grpc.WithBalancer will override this. LB *string - // Methods contains a map for the methods in this service. - // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. - // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. - // Otherwise, the method has no MethodConfig to use. + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. Methods map[string]MethodConfig - stickinessMetadataKey *string + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy +} + +// retryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + maxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoffMS). In general, the nth attempt will occur at + // random(0, + // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). + // + // These fields are required and must be greater than zero. + initialBackoff time.Duration + maxBackoff time.Duration + backoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + retryableStatusCodes map[codes.Code]bool +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff string + MaxBackoff string + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 } func parseDuration(s *string) (*time.Duration, error) { @@ -144,13 +218,14 @@ type jsonMC struct { Timeout *string MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy } // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { - LoadBalancingPolicy *string - StickinessMetadataKey *string - MethodConfig *[]jsonMC + LoadBalancingPolicy *string + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy } func parseServiceConfig(js string) (ServiceConfig, error) { @@ -161,10 +236,9 @@ func parseServiceConfig(js string) (ServiceConfig, error) { return ServiceConfig{}, err } sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, - Methods: make(map[string]MethodConfig), - - stickinessMetadataKey: rsc.StickinessMetadataKey, + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, } if rsc.MethodConfig == nil { return sc, nil @@ -184,6 +258,10 @@ func parseServiceConfig(js string) (ServiceConfig, error) { WaitForReady: m.WaitForReady, Timeout: d, } + if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } if m.MaxRequestMessageBytes != nil { if *m.MaxRequestMessageBytes > int64(maxInt) { mc.MaxReqSize = newInt(maxInt) @@ -205,9 +283,56 @@ func parseServiceConfig(js string) (ServiceConfig, error) { } } + if sc.retryThrottling != nil { + if sc.retryThrottling.MaxTokens <= 0 || + sc.retryThrottling.MaxTokens >= 1000 || + sc.retryThrottling.TokenRatio <= 0 { + // Illegal throttling config; disable throttling. + sc.retryThrottling = nil + } + } return sc, nil } +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) { + if jrp == nil { + return nil, nil + } + ib, err := parseDuration(&jrp.InitialBackoff) + if err != nil { + return nil, err + } + mb, err := parseDuration(&jrp.MaxBackoff) + if err != nil { + return nil, err + } + + if jrp.MaxAttempts <= 1 || + *ib <= 0 || + *mb <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &retryPolicy{ + maxAttempts: jrp.MaxAttempts, + initialBackoff: *ib, + maxBackoff: *mb, + backoffMultiplier: jrp.BackoffMultiplier, + retryableStatusCodes: make(map[codes.Code]bool), + } + if rp.maxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.maxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.retryableStatusCodes[code] = true + } + return rp, nil +} + func min(a, b *int) *int { if *a < *b { return a diff --git a/vendor/google.golang.org/grpc/stickiness_linkedmap.go b/vendor/google.golang.org/grpc/stickiness_linkedmap.go deleted file mode 100644 index 1c726af..0000000 --- a/vendor/google.golang.org/grpc/stickiness_linkedmap.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "container/list" -) - -type linkedMapKVPair struct { - key string - value *stickyStoreEntry -} - -// linkedMap is an implementation of a map that supports removing the oldest -// entry. -// -// linkedMap is NOT thread safe. -// -// It's for use of stickiness only! -type linkedMap struct { - m map[string]*list.Element - l *list.List // Head of the list is the oldest element. -} - -// newLinkedMap returns a new LinkedMap. -func newLinkedMap() *linkedMap { - return &linkedMap{ - m: make(map[string]*list.Element), - l: list.New(), - } -} - -// put adds entry (key, value) to the map. Existing key will be overridden. -func (m *linkedMap) put(key string, value *stickyStoreEntry) { - if oldE, ok := m.m[key]; ok { - // Remove existing entry. - m.l.Remove(oldE) - } - e := m.l.PushBack(&linkedMapKVPair{key: key, value: value}) - m.m[key] = e -} - -// get returns the value of the given key. -func (m *linkedMap) get(key string) (*stickyStoreEntry, bool) { - e, ok := m.m[key] - if !ok { - return nil, false - } - m.l.MoveToBack(e) - return e.Value.(*linkedMapKVPair).value, true -} - -// remove removes key from the map, and returns the value. The map is not -// modified if key is not in the map. -func (m *linkedMap) remove(key string) (*stickyStoreEntry, bool) { - e, ok := m.m[key] - if !ok { - return nil, false - } - delete(m.m, key) - m.l.Remove(e) - return e.Value.(*linkedMapKVPair).value, true -} - -// len returns the len of the map. -func (m *linkedMap) len() int { - return len(m.m) -} - -// clear removes all elements from the map. -func (m *linkedMap) clear() { - m.m = make(map[string]*list.Element) - m.l = list.New() -} - -// removeOldest removes the oldest key from the map. -func (m *linkedMap) removeOldest() { - e := m.l.Front() - m.l.Remove(e) - delete(m.m, e.Value.(*linkedMapKVPair).key) -} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 152d9ec..65d45a1 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -21,6 +21,8 @@ package grpc import ( "errors" "io" + "math" + "strconv" "sync" "time" @@ -29,11 +31,13 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "google.golang.org/grpc/transport" ) // StreamHandler defines the handler called by gRPC server to complete the @@ -55,31 +59,20 @@ type StreamDesc struct { // Stream defines the common interface a client or server stream has to satisfy. // -// All errors returned from Stream are compatible with the status package. +// Deprecated: See ClientStream and ServerStream documentation instead. type Stream interface { - // Context returns the context for this stream. + // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context - // SendMsg blocks until it sends m, the stream is done or the stream - // breaks. - // On error, it aborts the stream and returns an RPC status on client - // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. Also Users can call SendMsg - // directly when it is really needed in their use cases. - // It's safe to have a goroutine calling SendMsg and another goroutine calling - // recvMsg on the same stream at the same time. - // But it is not safe to call SendMsg on the same stream in different goroutines. + // Deprecated: See ClientStream and ServerStream documentation instead. SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message or the stream is - // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the stream and returns an RPC status. On - // server side, it simply returns the error to the caller. - // It's safe to have a goroutine calling SendMsg and another goroutine calling - // recvMsg on the same stream at the same time. - // But it is not safe to call RecvMsg on the same stream in different goroutines. + // Deprecated: See ClientStream and ServerStream documentation instead. RecvMsg(m interface{}) error } -// ClientStream defines the interface a client stream has to satisfy. +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. type ClientStream interface { // Header returns the header metadata received from the server if there // is any. It blocks if the metadata is not ready to read. @@ -91,13 +84,38 @@ type ClientStream interface { // CloseSend closes the send direction of the stream. It closes the stream // when non-nil error is met. CloseSend() error - // Stream.SendMsg() may return a non-nil error when something wrong happens sending - // the request. The returned error indicates the status of this sending, not the final - // status of the RPC. + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. // - // Always call Stream.RecvMsg() to drain the stream and get the final - // status, otherwise there could be leaked resources. - Stream + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error } // NewStream creates a new Stream for the client side. This is typically @@ -128,8 +146,6 @@ func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method st } // NewClientStream is a wrapper for ClientConn.NewStream. -// -// DEPRECATED: Use ClientConn.NewStream instead. func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { return cc.NewStream(ctx, desc, method, opts...) } @@ -178,13 +194,8 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - // If it's not client streaming, we should already have the request to be sent, - // so we don't flush the header. - // If it's client streaming, the user may never send a request or send it any - // time soon, so we ask the transport to flush the header. - Flush: desc.ClientStreams, + Host: cc.authority, + Method: method, ContentSubtype: c.contentSubtype, } @@ -218,15 +229,6 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } trInfo.tr.LazyLog(&trInfo.firstLine, false) ctx = trace.NewContext(ctx, trInfo.tr) - defer func() { - if err != nil { - // Need to call tr.finish() if error is returned. - // Because tr will not be returned to caller. - trInfo.tr.LazyPrintf("RPC: [%v]", err) - trInfo.tr.SetError() - trInfo.tr.Finish() - } - }() } ctx = newContextWithRPCInfo(ctx, c.failFast) sh := cc.dopts.copts.StatsHandler @@ -240,80 +242,41 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth FailFast: c.failFast, } sh.HandleRPC(ctx, begin) - defer func() { - if err != nil { - // Only handle end stats if err != nil. - end := &stats.End{ - Client: true, - Error: err, - BeginTime: beginTime, - EndTime: time.Now(), - } - sh.HandleRPC(ctx, end) - } - }() } - var ( - t transport.ClientTransport - s *transport.Stream - done func(balancer.DoneInfo) - ) - for { - // Check to make sure the context has expired. This will prevent us from - // looping forever if an error occurs for wait-for-ready RPCs where no data - // is sent on the wire. - select { - case <-ctx.Done(): - return nil, toRPCErr(ctx.Err()) - default: - } - - t, done, err = cc.getTransport(ctx, c.failFast) - if err != nil { - return nil, err - } + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + beginTime: beginTime, + firstAttempt: true, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + + cs.callInfo.stream = cs + // Only this initial attempt has stats/tracing. + // TODO(dfawley): move to newAttempt when per-attempt stats are implemented. + if err := cs.newAttemptLocked(sh, trInfo); err != nil { + cs.finish(err) + return nil, err + } - s, err = t.NewStream(ctx, callHdr) - if err != nil { - if done != nil { - done(balancer.DoneInfo{Err: err}) - done = nil - } - // In the event of any error from NewStream, we never attempted to write - // anything to the wire, so we can retry indefinitely for non-fail-fast - // RPCs. - if !c.failFast { - continue - } - return nil, toRPCErr(err) - } - break + op := func(a *csAttempt) error { return a.newStream() } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + cs.finish(err) + return nil, err } - cs := &clientStream{ - opts: opts, - c: c, - cc: cc, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - cancel: cancel, - attempt: &csAttempt{ - t: t, - s: s, - p: &parser{r: s}, - done: done, - dc: cc.dopts.dc, - ctx: ctx, - trInfo: trInfo, - statsHandler: sh, - beginTime: beginTime, - }, - } - cs.c.stream = cs - cs.attempt.cs = cs if desc != unaryStreamDesc { // Listen on cc and stream contexts to cleanup when the user closes the // ClientConn or cancels the stream context. In all other cases, an error @@ -332,12 +295,45 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth return cs, nil } +func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error { + cs.attempt = &csAttempt{ + cs: cs, + dc: cs.cc.dopts.dc, + statsHandler: sh, + trInfo: trInfo, + } + + if err := cs.ctx.Err(); err != nil { + return toRPCErr(err) + } + t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + return err + } + cs.attempt.t = t + cs.attempt.done = done + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + s, err := a.t.NewStream(cs.ctx, cs.callHdr) + if err != nil { + return toRPCErr(err) + } + cs.attempt.s = s + cs.attempt.p = &parser{r: s} + return nil +} + // clientStream implements a client side Stream. type clientStream struct { - opts []CallOption - c *callInfo - cc *ClientConn - desc *StreamDesc + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc codec baseCodec cp Compressor @@ -345,13 +341,25 @@ type clientStream struct { cancel context.CancelFunc // cancels all attempts - sentLast bool // sent an end stream + sentLast bool // sent an end stream + beginTime time.Time + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing - mu sync.Mutex // guards finished - finished bool // TODO: replace with atomic cmpxchg or sync.Once? + retryThrottler *retryThrottler // The throttler active when the RPC began. - attempt *csAttempt // the active client stream attempt + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + attempt *csAttempt // the active client stream attempt // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer } // csAttempt implements a single transport stream attempt within a @@ -363,53 +371,298 @@ type csAttempt struct { p *parser done func(balancer.DoneInfo) + finished bool dc Decompressor decomp encoding.Compressor decompSet bool - ctx context.Context // the application's context, wrapped by stats/tracing - mu sync.Mutex // guards trInfo.tr // trInfo.tr is set when created (if EnableTracing is true), // and cleared when the finish method is called. trInfo traceInfo statsHandler stats.Handler - beginTime time.Time +} + +func (cs *clientStream) commitAttemptLocked() { + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. +func (cs *clientStream) shouldRetry(err error) error { + if cs.attempt.s == nil && !cs.callInfo.failFast { + // In the event of any error from NewStream (attempt.s == nil), we + // never attempted to write anything to the wire, so we can retry + // indefinitely for non-fail-fast RPCs. + return nil + } + if cs.finished || cs.committed { + // RPC is finished or committed; cannot retry. + return err + } + // Wait for the trailers. + if cs.attempt.s != nil { + <-cs.attempt.s.Done() + } + if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) { + // First attempt, wait-for-ready, stream unprocessed: transparently retry. + cs.firstAttempt = false + return nil + } + cs.firstAttempt = false + if cs.cc.dopts.disableRetry { + return err + } + + pushback := 0 + hasPushback := false + if cs.attempt.s != nil { + if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil { + // Context error; stop now. + return toErr + } else if !to { + return err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + hasPushback = true + } else if len(sps) > 1 { + grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return err + } + } + + var code codes.Code + if cs.attempt.s != nil { + code = cs.attempt.s.Status().Code() + } else { + code = status.Convert(err).Code() + } + + rp := cs.methodConfig.retryPolicy + if rp == nil || !rp.retryableStatusCodes[code] { + return err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return err + } + if cs.numRetries+1 >= rp.maxAttempts { + return err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.initialBackoff) * fact + if max := float64(rp.maxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return nil + case <-cs.ctx.Done(): + t.Stop() + return status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(lastErr error) error { + for { + cs.attempt.finish(lastErr) + if err := cs.shouldRetry(lastErr); err != nil { + cs.commitAttemptLocked() + return err + } + if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil { + return err + } + if lastErr = cs.replayBufferLocked(); lastErr == nil { + return nil + } + } } func (cs *clientStream) Context() context.Context { - // TODO(retry): commit the current attempt (the context has peer-aware data). - return cs.attempt.context() + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + return cs.attempt.s.Context() +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + return op(cs.attempt) + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(err); err != nil { + cs.mu.Unlock() + return err + } + } } func (cs *clientStream) Header() (metadata.MD, error) { - m, err := cs.attempt.header() + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) if err != nil { - // TODO(retry): maybe retry on error or commit attempt on success. - err = toRPCErr(err) cs.finish(err) } return m, err } func (cs *clientStream) Trailer() metadata.MD { - // TODO(retry): on error, maybe retry (trailers-only). - return cs.attempt.trailer() + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked() error { + a := cs.attempt + for _, f := range cs.buffer { + if err := f(a); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) } func (cs *clientStream) SendMsg(m interface{}) (err error) { - // TODO(retry): buffer message for replaying if not committed. - return cs.attempt.sendMsg(m) + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + data, err := encode(cs.codec, m) + if err != nil { + return err + } + compData, err := compress(data, cs.cp, cs.comp) + if err != nil { + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + op := func(a *csAttempt) error { + err := a.sendMsg(m, hdr, payload, data) + // nil out the message and uncomp when replaying; they are only needed for + // stats which is disabled for subsequent attempts. + m, data = nil, nil + return err + } + return cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) } -func (cs *clientStream) RecvMsg(m interface{}) (err error) { - // TODO(retry): maybe retry on error or commit attempt on success. - return cs.attempt.recvMsg(m) +func (cs *clientStream) RecvMsg(m interface{}) error { + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m) + }, cs.commitAttemptLocked) + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + return err } func (cs *clientStream) CloseSend() error { - cs.attempt.closeSend() + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { return a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + // We never returned an error here for reasons. return nil } @@ -424,7 +677,11 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + cs.commitAttemptLocked() cs.mu.Unlock() + if err == nil { + cs.retryThrottler.successfulRPC() + } if channelz.IsOn() { if err != nil { cs.cc.incrCallsFailed() @@ -432,46 +689,20 @@ func (cs *clientStream) finish(err error) { cs.cc.incrCallsSucceeded() } } - // TODO(retry): commit current attempt if necessary. - cs.attempt.finish(err) - for _, o := range cs.opts { - o.after(cs.c) + if cs.attempt != nil { + cs.attempt.finish(err) + } + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo) + } } cs.cancel() } -func (a *csAttempt) context() context.Context { - return a.s.Context() -} - -func (a *csAttempt) header() (metadata.MD, error) { - return a.s.Header() -} - -func (a *csAttempt) trailer() metadata.MD { - return a.s.Trailer() -} - -func (a *csAttempt) sendMsg(m interface{}) (err error) { - // TODO Investigate how to signal the stats handling party. - // generate error stats if err != nil && err != io.EOF? +func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { cs := a.cs - defer func() { - // For non-client-streaming RPCs, we return nil instead of EOF on success - // because the generated code requires it. finish is not called; RecvMsg() - // will call it with the stream's status independently. - if err == io.EOF && !cs.desc.ClientStreams { - err = nil - } - if err != nil && err != io.EOF { - // Call finish on the client stream for errors generated by this SendMsg - // call, as these indicate problems created by this client. (Transport - // errors are converted to an io.EOF error below; the real error will be - // returned from RecvMsg eventually in that case, or be retried.) - cs.finish(err) - } - }() - // TODO: Check cs.sentLast and error if we already ended the stream. if EnableTracing { a.mu.Lock() if a.trInfo.tr != nil { @@ -479,44 +710,26 @@ func (a *csAttempt) sendMsg(m interface{}) (err error) { } a.mu.Unlock() } - data, err := encode(cs.codec, m) - if err != nil { - return err - } - compData, err := compress(data, cs.cp, cs.comp) - if err != nil { - return err - } - hdr, payload := msgHeader(data, compData) - // TODO(dfawley): should we be checking len(data) instead? - if len(payload) > *cs.c.maxSendMessageSize { - return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.c.maxSendMessageSize) + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF } - - if !cs.desc.ClientStreams { - cs.sentLast = true + if a.statsHandler != nil { + a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now())) } - err = a.t.Write(a.s, hdr, payload, &transport.Options{Last: !cs.desc.ClientStreams}) - if err == nil { - if a.statsHandler != nil { - a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, data, payload, time.Now())) - } - if channelz.IsOn() { - a.t.IncrMsgSent() - } - return nil + if channelz.IsOn() { + a.t.IncrMsgSent() } - return io.EOF + return nil } func (a *csAttempt) recvMsg(m interface{}) (err error) { cs := a.cs - defer func() { - if err != nil || !cs.desc.ServerStreams { - // err != nil or non-server-streaming indicates end of stream. - cs.finish(err) - } - }() var inPayload *stats.InPayload if a.statsHandler != nil { inPayload = &stats.InPayload{ @@ -539,7 +752,7 @@ func (a *csAttempt) recvMsg(m interface{}) (err error) { // Only initialize this state once per stream. a.decompSet = true } - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp) + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, inPayload, a.decomp) if err != nil { if err == io.EOF { if statusErr := a.s.Status().Err(); statusErr != nil { @@ -557,7 +770,7 @@ func (a *csAttempt) recvMsg(m interface{}) (err error) { a.mu.Unlock() } if inPayload != nil { - a.statsHandler.HandleRPC(a.ctx, inPayload) + a.statsHandler.HandleRPC(cs.ctx, inPayload) } if channelz.IsOn() { a.t.IncrMsgRecv() @@ -569,7 +782,7 @@ func (a *csAttempt) recvMsg(m interface{}) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp) + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) if err == nil { return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) } @@ -579,37 +792,40 @@ func (a *csAttempt) recvMsg(m interface{}) (err error) { return toRPCErr(err) } -func (a *csAttempt) closeSend() { - cs := a.cs - if cs.sentLast { - return - } - cs.sentLast = true - cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true}) - // We ignore errors from Write. Any error it would return would also be - // returned by a subsequent RecvMsg call, and the user is supposed to always - // finish the stream by calling RecvMsg until it returns err != nil. -} - func (a *csAttempt) finish(err error) { a.mu.Lock() - a.t.CloseStream(a.s, err) + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if a.s != nil { + a.t.CloseStream(a.s, err) + } if a.done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } a.done(balancer.DoneInfo{ Err: err, - BytesSent: true, - BytesReceived: a.s.BytesReceived(), + BytesSent: a.s != nil, + BytesReceived: br, }) } if a.statsHandler != nil { end := &stats.End{ Client: true, - BeginTime: a.beginTime, + BeginTime: a.cs.beginTime, EndTime: time.Now(), Error: err, } - a.statsHandler.HandleRPC(a.ctx, end) + a.statsHandler.HandleRPC(a.cs.ctx, end) } if a.trInfo.tr != nil { if err == nil { @@ -624,7 +840,10 @@ func (a *csAttempt) finish(err error) { a.mu.Unlock() } -// ServerStream defines the interface a server stream has to satisfy. +// ServerStream defines the server-side behavior of a streaming RPC. +// +// All errors returned from ServerStream methods are compatible with the +// status package. type ServerStream interface { // SetHeader sets the header metadata. It may be called multiple times. // When call multiple times, all the provided metadata will be merged. @@ -640,7 +859,32 @@ type ServerStream interface { // SetTrailer sets the trailer metadata which will be sent with the RPC status. // When called more than once, all the provided metadata will be merged. SetTrailer(metadata.MD) - Stream + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error } // serverStream implements a server side Stream. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 7f124fb..8ee619b 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.13.0" +const Version = "1.15.0" diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/.travis.yml b/vendor/gopkg.in/bblfsh/client-go.v2/.travis.yml index 1a1ab27..480a687 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/.travis.yml +++ b/vendor/gopkg.in/bblfsh/client-go.v2/.travis.yml @@ -1,6 +1,8 @@ language: go go_import_path: gopkg.in/bblfsh/client-go.v2 +env: + - BBLFSHD_VERSION=v2.6.1 BBLFSH_PYTHON_VERSION=v2.2.1 install: - | if [[ $TRAVIS_OS_NAME = linux ]]; then @@ -9,8 +11,8 @@ install: sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-6 g++-6 sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-6 90 sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 90 - docker run --privileged -d -p 9432:9432 --name bblfsh bblfsh/bblfshd - docker exec -it bblfsh bblfshctl driver install python bblfsh/python-driver + docker run --privileged -d -p 9432:9432 --name bblfshd bblfsh/bblfshd:$BBLFSHD_VERSION + docker exec bblfshd bblfshctl driver install bblfsh/python-driver:$BBLFSH_PYTHON_VERSION fi - make dependencies diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/Gopkg.lock b/vendor/gopkg.in/bblfsh/client-go.v2/Gopkg.lock new file mode 100644 index 0000000..82b47db --- /dev/null +++ b/vendor/gopkg.in/bblfsh/client-go.v2/Gopkg.lock @@ -0,0 +1,206 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/BurntSushi/toml" + packages = ["."] + revision = "b26d9c308763d68093482582cea63d69be07a0f0" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "github.com/antchfx/xpath" + packages = ["."] + revision = "3de91f3991a1af6e495d49c9218318b5544b20e3" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "gogoproto", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types" + ] + revision = "636bf0302bc95575d69441b25a2603156ffdddf1" + version = "v1.1.1" + +[[projects]] + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp" + ] + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + +[[projects]] + name = "github.com/jessevdk/go-flags" + packages = ["."] + revision = "c6ca198ec95c841fdb89fc0de7496fed11ab854e" + version = "v1.4.0" + +[[projects]] + branch = "master" + name = "github.com/mcuadros/go-lookup" + packages = ["."] + revision = "5650f26be7675b629fff8356a50d906fa03e9c8b" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + version = "v1.2.2" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace" + ] + revision = "f4c29de78a2a91c00474a2e689954305c350adf9" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314" + +[[projects]] + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable" + ] + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "daca94659cb50e9f37c1b834680f2e46358f10b0" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap" + ] + revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" + version = "v1.14.0" + +[[projects]] + name = "gopkg.in/bblfsh/sdk.v1" + packages = [ + "manifest", + "protocol", + "uast" + ] + revision = "94e3b212553e761677da180f321d9a7a60ebec5f" + version = "v1.16.1" + +[[projects]] + name = "gopkg.in/bblfsh/sdk.v2" + packages = [ + "driver", + "driver/manifest", + "protocol", + "protocol/v1", + "uast", + "uast/nodes", + "uast/nodes/nodesproto", + "uast/nodes/nodesproto/pio", + "uast/query", + "uast/query/xpath", + "uast/role", + "uast/transformer", + "uast/yaml" + ] + revision = "801385dfdc5c955c410e52c4b02f8622c64a7d85" + version = "v2.2.2" + +[[projects]] + name = "gopkg.in/src-d/go-errors.v1" + packages = ["."] + revision = "8bbbeeb767dfdd053b9b45d5a16a4f4ce2c6f694" + version = "v1.0.0" + +[[projects]] + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "403f83976885d8f2dd873c1db6df963e4a5685e217103ac49dca60d0fbd9d472" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/Gopkg.toml b/vendor/gopkg.in/bblfsh/client-go.v2/Gopkg.toml new file mode 100644 index 0000000..c8dfa67 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/client-go.v2/Gopkg.toml @@ -0,0 +1,26 @@ +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. + +[[constraint]] + name = "github.com/jessevdk/go-flags" + version = "1.4.x" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.x" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.14.x" + +[[constraint]] + name = "gopkg.in/bblfsh/sdk.v1" + version = "1.16.x" + +[[constraint]] + name = "gopkg.in/bblfsh/sdk.v2" + version = "2.2.x" + +[prune] + go-tests = true + unused-packages = true diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/Makefile b/vendor/gopkg.in/bblfsh/client-go.v2/Makefile index 3cb1c97..a9be9c3 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/Makefile +++ b/vendor/gopkg.in/bblfsh/client-go.v2/Makefile @@ -1,6 +1,6 @@ # Package configuration PROJECT = client-go -LIBUAST_VERSION ?= 1.9.4 +LIBUAST_VERSION ?= 2.0.1 TOOLS_FOLDER = tools @@ -65,4 +65,4 @@ unix-dependencies: curl -SL https://github.com/bblfsh/libuast/archive/v$(LIBUAST_VERSION).tar.gz | tar xz mv libuast-$(LIBUAST_VERSION)/src/* $(TOOLS_FOLDER)/. rm -rf libuast-$(LIBUAST_VERSION) -.PHONY: unix-dependencies \ No newline at end of file +.PHONY: unix-dependencies diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/README.md b/vendor/gopkg.in/bblfsh/client-go.v2/README.md index e373b38..1eae31c 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/README.md +++ b/vendor/gopkg.in/bblfsh/client-go.v2/README.md @@ -20,7 +20,7 @@ It is also possible to link against custom `libuast` on Windows, read [WINDOWS.m This small example illustrates how to retrieve the [UAST](https://doc.bblf.sh/uast/specification.html) from a small Python script. -If you don't have a bblfsh server installed, please read the [getting started](https://doc.bblf.sh/user/getting-started.html) guide, to learn more about how to use and deploy a bblfsh server. +If you don't have a bblfsh server installed, please read the [getting started](https://doc.bblf.sh/using-babelfish/getting-started.html) guide, to learn more about how to use and deploy a bblfsh server. Go to the[quick start](https://github.com/bblfsh/bblfshd#quick-start) to discover how to run Babelfish with Docker. @@ -94,7 +94,7 @@ strres, err := FilterString(res.UAST, "name(//*[1])") numres, err := FilterNumber(res.UAST, "count(//*)") ``` -Please read the [Babelfish clients](https://doc.bblf.sh/user/language-clients.html) guide section to learn more about babelfish clients and their query language. +Please read the [Babelfish clients](https://doc.bblf.sh/using-babelfish/clients.html) guide section to learn more about babelfish clients and their query language. ## License diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/client.go b/vendor/gopkg.in/bblfsh/client-go.v2/client.go index 8162ace..bac9d6f 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/client.go +++ b/vendor/gopkg.in/bblfsh/client-go.v2/client.go @@ -1,44 +1,57 @@ package bblfsh import ( + "context" "time" "google.golang.org/grpc" - "gopkg.in/bblfsh/sdk.v1/protocol" + protocol1 "gopkg.in/bblfsh/sdk.v1/protocol" + protocol2 "gopkg.in/bblfsh/sdk.v2/protocol" ) // Client holds the public client API to interact with the bblfsh daemon. type Client struct { *grpc.ClientConn - service protocol.ProtocolServiceClient + service1 protocol1.ProtocolServiceClient + service2 protocol2.DriverClient } -// NewClient returns a new bblfsh client given a bblfshd endpoint. -func NewClient(endpoint string) (*Client, error) { +// NewClientContext returns a new bblfsh client given a bblfshd endpoint. +func NewClientContext(ctx context.Context, endpoint string) (*Client, error) { opts := []grpc.DialOption{ - grpc.WithTimeout(5 * time.Second), grpc.WithBlock(), grpc.WithInsecure(), } - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.DialContext(ctx, endpoint, opts...) if err != nil { return nil, err } - return &Client{ - ClientConn: conn, - service: protocol.NewProtocolServiceClient(conn), - }, nil + return NewClientWithConnection(conn) +} + +// NewClient is the same as NewClientContext, but assumes a default timeout for the connection. +func NewClient(endpoint string) (*Client, error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + return NewClientContext(ctx, endpoint) } // NewClientWithConnection returns a new bblfsh client given a grpc connection. func NewClientWithConnection(conn *grpc.ClientConn) (*Client, error) { return &Client{ ClientConn: conn, - service: protocol.NewProtocolServiceClient(conn), + service1: protocol1.NewProtocolServiceClient(conn), + service2: protocol2.NewDriverClient(conn), }, nil } +// NewParseRequestV2 is a parsing request to get the UAST. +func (c *Client) NewParseRequestV2() *ParseRequestV2 { + return &ParseRequestV2{client: c} +} + // NewParseRequest is a parsing request to get the UAST. func (c *Client) NewParseRequest() *ParseRequest { return &ParseRequest{client: c} diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/request.go b/vendor/gopkg.in/bblfsh/client-go.v2/request.go index d8f284c..def9034 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/request.go +++ b/vendor/gopkg.in/bblfsh/client-go.v2/request.go @@ -2,11 +2,17 @@ package bblfsh import ( "context" + "fmt" "io/ioutil" "path/filepath" "strings" + "time" - "gopkg.in/bblfsh/sdk.v1/protocol" + protocol1 "gopkg.in/bblfsh/sdk.v1/protocol" + "gopkg.in/bblfsh/sdk.v2/driver" + protocol2 "gopkg.in/bblfsh/sdk.v2/protocol" + "gopkg.in/bblfsh/sdk.v2/protocol/v1" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" ) // FatalError is returned when response is returned with Fatal status code. @@ -19,9 +25,125 @@ func (e FatalError) Error() string { return strings.Join([]string(e), "\n") } +// ErrPartialParse is returned when driver was not able to parse the whole source file. +type ErrPartialParse = driver.ErrPartialParse + +// ParseRequestV2 is a parsing request to get the UAST. +type ParseRequestV2 struct { + internal protocol2.ParseRequest + client *Client + err error +} + +// Language sets the language of the given source file to parse. if missing +// will be guess from the filename and the content. +func (r *ParseRequestV2) Language(language string) *ParseRequestV2 { + r.internal.Language = language + return r +} + +// ReadFile loads a file given a local path and sets the content and the +// filename of the request. +func (r *ParseRequestV2) ReadFile(fp string) *ParseRequestV2 { + data, err := ioutil.ReadFile(fp) + if err != nil { + r.err = err + } else { + r.internal.Content = string(data) + r.internal.Filename = filepath.Base(fp) + } + + return r +} + +// Content sets the content of the parse request. It should be the source code +// that wants to be parsed. +func (r *ParseRequestV2) Content(content string) *ParseRequestV2 { + r.internal.Content = content + return r +} + +// Filename sets the filename of the content. +func (r *ParseRequestV2) Filename(filename string) *ParseRequestV2 { + r.internal.Filename = filename + return r +} + +// Mode controls the level of transformation applied to UAST. +type Mode = protocol2.Mode + +const ( + Native = protocol2.Mode_Native + Annotated = protocol2.Mode_Annotated + Semantic = protocol2.Mode_Semantic +) + +// Parse mode parses a UAST mode string to an enum value. +func ParseMode(mode string) (Mode, error) { + // TODO: define this function in SDK + switch mode { + case "native": + return Native, nil + case "annotated": + return Annotated, nil + case "semantic": + return Semantic, nil + } + return 0, fmt.Errorf("unsupported mode: %q", mode) +} + +// Mode controls the level of transformation applied to UAST. +func (r *ParseRequestV2) Mode(mode Mode) *ParseRequestV2 { + r.internal.Mode = mode + return r +} + +// Do performs the actual parsing by serializing the request, sending it to +// bblfshd and waiting for the response. +func (r *ParseRequestV2) Do() (*protocol2.ParseResponse, error) { + return r.DoContext(context.Background()) +} + +// DoContext does the same as Do(), but supports cancellation by the use of Go contexts. +func (r *ParseRequestV2) DoContext(ctx context.Context) (*protocol2.ParseResponse, error) { + if r.err != nil { + return nil, r.err + } + + resp, err := r.client.service2.Parse(ctx, &r.internal) + if err != nil { + return nil, err + } + + return resp, nil +} + +// UAST is the same as UASTContext, but uses context.Background as a context. +func (r *ParseRequestV2) UAST() (nodes.Node, string, error) { + return r.UASTContext(context.Background()) +} + +// UASTContext send the request and returns decoded UAST and the language. +// If a file contains syntax error, the ErrPartialParse is returned and will contain a partial AST. +func (r *ParseRequestV2) UASTContext(ctx context.Context) (nodes.Node, string, error) { + if r.err != nil { + return nil, "", r.err + } + resp, err := r.client.service2.Parse(ctx, &r.internal) + if err != nil { + return nil, "", err + } + ast, err := resp.Nodes() + if err != nil { + return nil, resp.Language, err + } + return ast, resp.Language, nil +} + // ParseRequest is a parsing request to get the UAST. type ParseRequest struct { - internal protocol.ParseRequest + internal protocol1.ParseRequest + mode *Mode // if set, switches to v2 protocol and downgrades the response client *Client err error } @@ -61,36 +183,102 @@ func (r *ParseRequest) Filename(filename string) *ParseRequest { } // Encoding sets the text encoding of the content. -func (r *ParseRequest) Encoding(encoding protocol.Encoding) *ParseRequest { +func (r *ParseRequest) Encoding(encoding protocol1.Encoding) *ParseRequest { r.internal.Encoding = encoding return r } +// Mode controls the level of transformation applied to UAST. +func (r *ParseRequest) Mode(mode Mode) *ParseRequest { + r.mode = &mode + return r +} + // Do performs the actual parsing by serializing the request, sending it to // bblfshd and waiting for the response. -func (r *ParseRequest) Do() (*protocol.ParseResponse, error) { +func (r *ParseRequest) Do() (*protocol1.ParseResponse, error) { return r.DoWithContext(context.Background()) } // DoWithContext does the same as Do(), but sopporting cancellation by the use // of Go contexts. -func (r *ParseRequest) DoWithContext(ctx context.Context) (*protocol.ParseResponse, error) { +func (r *ParseRequest) DoWithContext(ctx context.Context) (*protocol1.ParseResponse, error) { if r.err != nil { return nil, r.err } + if r.mode != nil { + return r.doV2(ctx) + } - resp, err := r.client.service.Parse(ctx, &r.internal) + resp, err := r.client.service1.Parse(ctx, &r.internal) if err != nil { return nil, err - } else if resp.Status == protocol.Fatal { + } else if resp.Status == protocol1.Fatal { return resp, FatalError(resp.Errors) } return resp, nil } +// doV2 converts v1 request to v2, send the request including the "mode" parameter and +// convert the response back to v1 format. +func (r *ParseRequest) doV2(ctx context.Context) (*protocol1.ParseResponse, error) { + start := time.Now() + astV2, lang, err := r.UASTContext(ctx) + if err != nil { + return nil, err + } + astV1, err := uast1.ToNode(astV2) + if err != nil { + return nil, fmt.Errorf("cannot convert to v1 uast: %v", err) + } + out := &protocol1.ParseResponse{ + Language: lang, + Filename: r.internal.Filename, + UAST: astV1, + } + out.Status = protocol1.Ok + out.Elapsed = time.Since(start) + return out, nil +} + +// UAST is the same as UASTContext, but uses context.Background as a context. +func (r *ParseRequest) UAST() (nodes.Node, string, error) { + return r.UASTContext(context.Background()) +} + +// UASTContext send the request and returns decoded UAST and the language. +// If a file contains syntax error, the ErrPartialParse is returned and will contain a partial AST. +func (r *ParseRequest) UASTContext(ctx context.Context) (nodes.Node, string, error) { + if r.err != nil { + return nil, "", r.err + } + if r.internal.Timeout > 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, r.internal.Timeout) + defer cancel() + } + req := &protocol2.ParseRequest{ + Filename: r.internal.Filename, + Language: r.internal.Language, + Content: r.internal.Content, + } + if r.mode != nil { + req.Mode = *r.mode + } + resp, err := r.client.service2.Parse(ctx, req) + if err != nil { + return nil, "", err + } + ast, err := resp.Nodes() + if err != nil { + return nil, resp.Language, fmt.Errorf("cannot decode the uast: %v", err) + } + return ast, resp.Language, nil +} + // NativeParseRequest is a parsing request to get the AST. type NativeParseRequest struct { - internal protocol.NativeParseRequest + internal protocol1.NativeParseRequest client *Client err error } @@ -130,28 +318,28 @@ func (r *NativeParseRequest) Filename(filename string) *NativeParseRequest { } // Encoding sets the text encoding of the content. -func (r *NativeParseRequest) Encoding(encoding protocol.Encoding) *NativeParseRequest { +func (r *NativeParseRequest) Encoding(encoding protocol1.Encoding) *NativeParseRequest { r.internal.Encoding = encoding return r } // Do performs the actual parsing by serializing the request, sending it to // bblfsd and waiting for the response. -func (r *NativeParseRequest) Do() (*protocol.NativeParseResponse, error) { +func (r *NativeParseRequest) Do() (*protocol1.NativeParseResponse, error) { return r.DoWithContext(context.Background()) } // DoWithContext does the same as Do(), but sopporting cancellation by the use // of Go contexts. -func (r *NativeParseRequest) DoWithContext(ctx context.Context) (*protocol.NativeParseResponse, error) { +func (r *NativeParseRequest) DoWithContext(ctx context.Context) (*protocol1.NativeParseResponse, error) { if r.err != nil { return nil, r.err } - resp, err := r.client.service.NativeParse(ctx, &r.internal) + resp, err := r.client.service1.NativeParse(ctx, &r.internal) if err != nil { return nil, err - } else if resp.Status == protocol.Fatal { + } else if resp.Status == protocol1.Fatal { return resp, FatalError(resp.Errors) } return resp, nil @@ -165,21 +353,21 @@ type VersionRequest struct { // Do performs the actual parsing by serializing the request, sending it to // bblfsd and waiting for the response. -func (r *VersionRequest) Do() (*protocol.VersionResponse, error) { +func (r *VersionRequest) Do() (*protocol1.VersionResponse, error) { return r.DoWithContext(context.Background()) } // DoWithContext does the same as Do(), but sopporting cancellation by the use // of Go contexts. -func (r *VersionRequest) DoWithContext(ctx context.Context) (*protocol.VersionResponse, error) { +func (r *VersionRequest) DoWithContext(ctx context.Context) (*protocol1.VersionResponse, error) { if r.err != nil { return nil, r.err } - resp, err := r.client.service.Version(ctx, &protocol.VersionRequest{}) + resp, err := r.client.service1.Version(ctx, &protocol1.VersionRequest{}) if err != nil { return nil, err - } else if resp.Status == protocol.Fatal { + } else if resp.Status == protocol1.Fatal { return resp, FatalError(resp.Errors) } return resp, nil @@ -193,21 +381,21 @@ type SupportedLanguagesRequest struct { // Do performs the actual parsing by serializing the request, sending it to // bblfsd and waiting for the response. -func (r *SupportedLanguagesRequest) Do() (*protocol.SupportedLanguagesResponse, error) { +func (r *SupportedLanguagesRequest) Do() (*protocol1.SupportedLanguagesResponse, error) { return r.DoWithContext(context.Background()) } // DoWithContext does the same as Do(), but sopporting cancellation by the use // of Go contexts. -func (r *SupportedLanguagesRequest) DoWithContext(ctx context.Context) (*protocol.SupportedLanguagesResponse, error) { +func (r *SupportedLanguagesRequest) DoWithContext(ctx context.Context) (*protocol1.SupportedLanguagesResponse, error) { if r.err != nil { return nil, r.err } - resp, err := r.client.service.SupportedLanguages(ctx, &protocol.SupportedLanguagesRequest{}) + resp, err := r.client.service1.SupportedLanguages(ctx, &protocol1.SupportedLanguagesRequest{}) if err != nil { return nil, err - } else if resp.Status == protocol.Fatal { + } else if resp.Status == protocol1.Fatal { return resp, FatalError(resp.Errors) } return resp, nil diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.go b/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.go index 044320a..e0c5614 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.go +++ b/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.go @@ -5,6 +5,7 @@ import ( "sort" "strings" "sync" + "sync/atomic" "unsafe" "gopkg.in/bblfsh/sdk.v1/uast" @@ -47,11 +48,80 @@ import ( import "C" var ( - findMutex sync.Mutex - spool cstringPool - kpool = make(map[*uast.Node][]string) + lastHandle handle // atomic + + ctxmu sync.RWMutex + ctxes = make(map[*C.Uast]*Context) + + global struct { + sync.Mutex + ctx *Context + } ) +func init() { + global.ctx = NewContext() +} + +type handle uint64 + +func nextHandle() handle { + return handle(atomic.AddUint64((*uint64)(&lastHandle), 1)) +} + +func freeString(s *C.char) { + C.free(unsafe.Pointer(s)) +} + +// NewContext creates a new query context. Caller should close the context to release resources. +func NewContext() *Context { + c := &Context{ + ctx: C.CreateUast(), + nodes: make(map[handle]*uast.Node), + keys: make(map[*uast.Node][]string), + } + ctxmu.Lock() + ctxes[c.ctx] = c + ctxmu.Unlock() + return c +} + +func getCtx(ctx *C.Uast) *Context { + ctxmu.RLock() + c := ctxes[ctx] + ctxmu.RUnlock() + return c +} + +type Context struct { + ctx *C.Uast + spool cstringPool + nodes map[handle]*uast.Node + keys map[*uast.Node][]string +} + +func (c *Context) cstring(s string) *C.char { + return c.spool.getCstring(s) +} + +func (c *Context) reset() { + c.spool.release() + c.nodes = make(map[handle]*uast.Node) + c.keys = make(map[*uast.Node][]string) +} +func (c *Context) Close() error { + if c.ctx == nil { + return nil + } + c.reset() + ctxmu.Lock() + delete(ctxes, c.ctx) + ctxmu.Unlock() + C.UastFree(c.ctx) + c.ctx = nil + return nil +} + type ErrInvalidArgument struct { Message string } @@ -96,40 +166,40 @@ const ( // Iterator allows for traversal over a UAST tree. type Iterator struct { + c *Context root *uast.Node - iterPtr C.uintptr_t + iterPtr *C.UastIterator finished bool } -func init() { - C.CreateUast() +func (c *Context) nodeToHandleC(node *uast.Node) C.NodeHandle { + return C.NodeHandle(c.nodeToHandle(node)) } - -func nodeToPtr(node *uast.Node) C.uintptr_t { - return C.uintptr_t(uintptr(unsafe.Pointer(node))) +func (c *Context) nodeToHandle(node *uast.Node) handle { + if c == nil || node == nil { + return 0 + } + h := nextHandle() + c.nodes[h] = node + return h } -func ptrToNode(ptr C.uintptr_t) *uast.Node { - return (*uast.Node)(unsafe.Pointer(uintptr(ptr))) +func (c *Context) handleToNodeC(h C.NodeHandle) *uast.Node { + return c.handleToNode(handle(h)) } - -// initFilter converts the query string and node pointer to C types. It acquires findMutex -// and initializes the string pool. The caller should defer returned function to release -// the resources. -func initFilter(node *uast.Node, xpath string) (*C.char, C.uintptr_t, func()) { - findMutex.Lock() - cquery := spool.getCstring(xpath) - ptr := nodeToPtr(node) - - return cquery, ptr, func() { - spool.release() - kpool = make(map[*uast.Node][]string) - findMutex.Unlock() +func (c *Context) handleToNode(h handle) *uast.Node { + if c == nil || h == 0 { + return nil } + n, ok := c.nodes[h] + if !ok { + panic(fmt.Errorf("unknown handle: %x", h)) + } + return n } func cError(name string) error { - e := C.Error() + e := C.LastError() msg := strings.TrimSpace(C.GoString(e)) C.free(unsafe.Pointer(e)) // TODO: find a way to access this error code or constant @@ -139,170 +209,284 @@ func cError(name string) error { return &errInternal{Method: name, Message: msg} } +var filterMu sync.Mutex + +func (c *Context) runFilter(fnc func()) { + // TODO: find a way to create XPath context objects + filterMu.Lock() + defer filterMu.Unlock() + fnc() + c.reset() +} + // Filter takes a `*uast.Node` and a xpath query and filters the tree, // returning the list of nodes that satisfy the given query. // Filter is thread-safe but not concurrent by an internal global lock. +// +// Deprecated: use Context.Filter func Filter(node *uast.Node, xpath string) ([]*uast.Node, error) { - if len(xpath) == 0 || node == nil { - return nil, nil - } - - cquery, ptr, closer := initFilter(node, xpath) - defer closer() + global.Lock() + defer global.Unlock() + return global.ctx.Filter(node, xpath) +} - if !C.Filter(ptr, cquery) { - return nil, cError("UastFilter") +// Filter takes a `*uast.Node` and a xpath query and filters the tree, +// returning the list of nodes that satisfy the given query. +// Filter is thread-safe but not concurrent by an internal global lock. +func (c *Context) Filter(node *uast.Node, xpath string) (out []*uast.Node, err error) { + if len(xpath) == 0 || node == nil { + return } + c.runFilter(func() { + cquery := C.CString(xpath) + nodes := C.UastFilter(c.ctx, c.nodeToHandleC(node), cquery) + freeString(cquery) + + if nodes == nil { + err = cError("UastFilter") + return + } + defer C.NodesFree(nodes) - nu := int(C.Size()) - results := make([]*uast.Node, nu) - for i := 0; i < nu; i++ { - results[i] = ptrToNode(C.At(C.int(i))) - } - return results, nil + n := int(C.NodesSize(nodes)) + out = make([]*uast.Node, n) + for i := 0; i < n; i++ { + h := C.NodeAt(nodes, C.int(i)) + out[i] = c.handleToNodeC(h) + } + }) + return } // FilterBool takes a `*uast.Node` and a xpath query with a boolean // return type (e.g. when using XPath functions returning a boolean type). // FilterBool is thread-safe but not concurrent by an internal global lock. +// +// Deprecated: use Context.FilterBool func FilterBool(node *uast.Node, xpath string) (bool, error) { - if len(xpath) == 0 || node == nil { - return false, nil - } - - cquery, ptr, closer := initFilter(node, xpath) - defer closer() - - res := C.FilterBool(ptr, cquery) - if res < 0 { - return false, cError("UastFilterBool") - } + global.Lock() + defer global.Unlock() + return global.ctx.FilterBool(node, xpath) +} - var gores bool - if res == 0 { - gores = false - } else if res == 1 { - gores = true - } else { - panic("Implementation error on FilterBool") +// FilterBool takes a `*uast.Node` and a xpath query with a boolean +// return type (e.g. when using XPath functions returning a boolean type). +// FilterBool is thread-safe but not concurrent by an internal global lock. +func (c *Context) FilterBool(node *uast.Node, xpath string) (out bool, err error) { + if len(xpath) == 0 || node == nil { + return } - - return gores, nil + c.runFilter(func() { + var ( + ok C.bool + cquery = C.CString(xpath) + ) + res := C.UastFilterBool(c.ctx, c.nodeToHandleC(node), cquery, &ok) + freeString(cquery) + if !bool(ok) { + err = cError("UastFilterBool") + return + } + out = bool(res) + }) + return } -// FilterBool takes a `*uast.Node` and a xpath query with a float +// FilterNumber takes a `*uast.Node` and a xpath query with a float // return type (e.g. when using XPath functions returning a float type). // FilterNumber is thread-safe but not concurrent by an internal global lock. +// +// Deprecated: use Context.FilterNumber func FilterNumber(node *uast.Node, xpath string) (float64, error) { - if len(xpath) == 0 || node == nil { - return 0, nil - } - - cquery, ptr, closer := initFilter(node, xpath) - defer closer() + global.Lock() + defer global.Unlock() + return global.ctx.FilterNumber(node, xpath) +} - var ok C.int - res := C.FilterNumber(ptr, cquery, &ok) - if ok == 0 { - return 0.0, cError("UastFilterNumber") +// FilterNumber takes a `*uast.Node` and a xpath query with a float +// return type (e.g. when using XPath functions returning a float type). +// FilterNumber is thread-safe but not concurrent by an internal global lock. +func (c *Context) FilterNumber(node *uast.Node, xpath string) (out float64, err error) { + if len(xpath) == 0 || node == nil { + return } - - return float64(res), nil + c.runFilter(func() { + var ( + ok C.bool + cquery = C.CString(xpath) + ) + res := C.UastFilterNumber(c.ctx, c.nodeToHandleC(node), cquery, &ok) + freeString(cquery) + if !bool(ok) { + err = cError("UastFilterNumber") + return + } + out = float64(res) + }) + return } // FilterString takes a `*uast.Node` and a xpath query with a string // return type (e.g. when using XPath functions returning a string type). // FilterString is thread-safe but not concurrent by an internal global lock. +// +// Deprecated: use Context.FilterString func FilterString(node *uast.Node, xpath string) (string, error) { + global.Lock() + defer global.Unlock() + return global.ctx.FilterString(node, xpath) +} + +// FilterString takes a `*uast.Node` and a xpath query with a string +// return type (e.g. when using XPath functions returning a string type). +// FilterString is thread-safe but not concurrent by an internal global lock. +func (c *Context) FilterString(node *uast.Node, xpath string) (out string, err error) { if len(xpath) == 0 || node == nil { - return "", nil + return } + c.runFilter(func() { + var ( + res *C.char + cquery = C.CString(xpath) + ) + res = C.UastFilterString(c.ctx, c.nodeToHandleC(node), cquery) + freeString(cquery) + if res == nil { + err = cError("UastFilterString") + return + } + out = C.GoString(res) + }) + return +} - cquery, ptr, closer := initFilter(node, xpath) - defer closer() - - var res *C.char - res = C.FilterString(ptr, cquery) - if res == nil { - return "", cError("UastFilterString") +func (c *Context) getPropertyKeys(node *uast.Node) []string { + if keys, ok := c.keys[node]; ok { + return keys } - - return C.GoString(res), nil + p := node.Properties + keys := make([]string, 0, len(p)) + for k := range p { + keys = append(keys, k) + } + sort.Strings(keys) + c.keys[node] = keys + return keys } //export goGetInternalType -func goGetInternalType(ptr C.uintptr_t) *C.char { - return spool.getCstring(ptrToNode(ptr).InternalType) +func goGetInternalType(ctx *C.Uast, ptr C.NodeHandle) *C.char { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return nil + } + return c.cstring(n.InternalType) } //export goGetToken -func goGetToken(ptr C.uintptr_t) *C.char { - return spool.getCstring(ptrToNode(ptr).Token) +func goGetToken(ctx *C.Uast, ptr C.NodeHandle) *C.char { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return nil + } + return c.cstring(n.Token) } //export goGetChildrenSize -func goGetChildrenSize(ptr C.uintptr_t) C.int { - return C.int(len(ptrToNode(ptr).Children)) +func goGetChildrenSize(ctx *C.Uast, ptr C.NodeHandle) C.int { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + return C.int(len(n.Children)) } //export goGetChild -func goGetChild(ptr C.uintptr_t, index C.int) C.uintptr_t { - child := ptrToNode(ptr).Children[int(index)] - return nodeToPtr(child) +func goGetChild(ctx *C.Uast, ptr C.NodeHandle, index C.int) C.NodeHandle { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + child := n.Children[int(index)] + return c.nodeToHandleC(child) } //export goGetRolesSize -func goGetRolesSize(ptr C.uintptr_t) C.int { - return C.int(len(ptrToNode(ptr).Roles)) +func goGetRolesSize(ctx *C.Uast, ptr C.NodeHandle) C.int { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + return C.int(len(n.Roles)) } //export goGetRole -func goGetRole(ptr C.uintptr_t, index C.int) C.uint16_t { - role := ptrToNode(ptr).Roles[int(index)] +func goGetRole(ctx *C.Uast, ptr C.NodeHandle, index C.int) C.uint16_t { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + role := n.Roles[int(index)] return C.uint16_t(role) } //export goGetPropertiesSize -func goGetPropertiesSize(ptr C.uintptr_t) C.int { - return C.int(len(ptrToNode(ptr).Properties)) -} - -func getPropertyKeys(ptr C.uintptr_t) []string { - node := ptrToNode(ptr) - if keys, ok := kpool[node]; ok { - return keys - } - p := node.Properties - keys := make([]string, 0, len(p)) - for k := range p { - keys = append(keys, k) +func goGetPropertiesSize(ctx *C.Uast, ptr C.NodeHandle) C.int { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 } - sort.Strings(keys) - kpool[node] = keys - return keys + return C.int(len(n.Properties)) } //export goGetPropertyKey -func goGetPropertyKey(ptr C.uintptr_t, index C.int) *C.char { - keys := getPropertyKeys(ptr) - return spool.getCstring(keys[int(index)]) +func goGetPropertyKey(ctx *C.Uast, ptr C.NodeHandle, index C.int) *C.char { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return nil + } + keys := c.getPropertyKeys(n) + return c.cstring(keys[int(index)]) } //export goGetPropertyValue -func goGetPropertyValue(ptr C.uintptr_t, index C.int) *C.char { - keys := getPropertyKeys(ptr) - p := ptrToNode(ptr).Properties - return spool.getCstring(p[keys[int(index)]]) +func goGetPropertyValue(ctx *C.Uast, ptr C.NodeHandle, index C.int) *C.char { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return nil + } + keys := c.getPropertyKeys(n) + p := n.Properties + return c.cstring(p[keys[int(index)]]) } //export goHasStartOffset -func goHasStartOffset(ptr C.uintptr_t) C.bool { - return ptrToNode(ptr).StartPosition != nil +func goHasStartOffset(ctx *C.Uast, ptr C.NodeHandle) C.bool { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return false + } + return n.StartPosition != nil } //export goGetStartOffset -func goGetStartOffset(ptr C.uintptr_t) C.uint32_t { - p := ptrToNode(ptr).StartPosition +func goGetStartOffset(ctx *C.Uast, ptr C.NodeHandle) C.uint32_t { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + p := n.StartPosition if p != nil { return C.uint32_t(p.Offset) } @@ -310,13 +494,23 @@ func goGetStartOffset(ptr C.uintptr_t) C.uint32_t { } //export goHasStartLine -func goHasStartLine(ptr C.uintptr_t) C.bool { - return ptrToNode(ptr).StartPosition != nil +func goHasStartLine(ctx *C.Uast, ptr C.NodeHandle) C.bool { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return false + } + return n.StartPosition != nil } //export goGetStartLine -func goGetStartLine(ptr C.uintptr_t) C.uint32_t { - p := ptrToNode(ptr).StartPosition +func goGetStartLine(ctx *C.Uast, ptr C.NodeHandle) C.uint32_t { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + p := n.StartPosition if p != nil { return C.uint32_t(p.Line) } @@ -324,13 +518,23 @@ func goGetStartLine(ptr C.uintptr_t) C.uint32_t { } //export goHasStartCol -func goHasStartCol(ptr C.uintptr_t) C.bool { - return ptrToNode(ptr).StartPosition != nil +func goHasStartCol(ctx *C.Uast, ptr C.NodeHandle) C.bool { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return false + } + return n.StartPosition != nil } //export goGetStartCol -func goGetStartCol(ptr C.uintptr_t) C.uint32_t { - p := ptrToNode(ptr).StartPosition +func goGetStartCol(ctx *C.Uast, ptr C.NodeHandle) C.uint32_t { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + p := n.StartPosition if p != nil { return C.uint32_t(p.Col) } @@ -338,13 +542,23 @@ func goGetStartCol(ptr C.uintptr_t) C.uint32_t { } //export goHasEndOffset -func goHasEndOffset(ptr C.uintptr_t) C.bool { - return ptrToNode(ptr).EndPosition != nil +func goHasEndOffset(ctx *C.Uast, ptr C.NodeHandle) C.bool { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return false + } + return n.EndPosition != nil } //export goGetEndOffset -func goGetEndOffset(ptr C.uintptr_t) C.uint32_t { - p := ptrToNode(ptr).EndPosition +func goGetEndOffset(ctx *C.Uast, ptr C.NodeHandle) C.uint32_t { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + p := n.EndPosition if p != nil { return C.uint32_t(p.Offset) } @@ -352,13 +566,23 @@ func goGetEndOffset(ptr C.uintptr_t) C.uint32_t { } //export goHasEndLine -func goHasEndLine(ptr C.uintptr_t) C.bool { - return ptrToNode(ptr).EndPosition != nil +func goHasEndLine(ctx *C.Uast, ptr C.NodeHandle) C.bool { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return false + } + return n.EndPosition != nil } //export goGetEndLine -func goGetEndLine(ptr C.uintptr_t) C.uint32_t { - p := ptrToNode(ptr).EndPosition +func goGetEndLine(ctx *C.Uast, ptr C.NodeHandle) C.uint32_t { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + p := n.EndPosition if p != nil { return C.uint32_t(p.Line) } @@ -366,13 +590,23 @@ func goGetEndLine(ptr C.uintptr_t) C.uint32_t { } //export goHasEndCol -func goHasEndCol(ptr C.uintptr_t) C.bool { - return ptrToNode(ptr).EndPosition != nil +func goHasEndCol(ctx *C.Uast, ptr C.NodeHandle) C.bool { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return false + } + return n.EndPosition != nil } //export goGetEndCol -func goGetEndCol(ptr C.uintptr_t) C.uint32_t { - p := ptrToNode(ptr).EndPosition +func goGetEndCol(ctx *C.Uast, ptr C.NodeHandle) C.uint32_t { + c := getCtx(ctx) + n := c.handleToNodeC(ptr) + if n == nil { + return 0 + } + p := n.EndPosition if p != nil { return C.uint32_t(p.Col) } @@ -384,16 +618,26 @@ func goGetEndCol(ptr C.uintptr_t) C.uint32_t { // the iteration have finished or you don't need the iterator anymore you must // dispose it with the Dispose() method (or call it with `defer`). func NewIterator(node *uast.Node, order TreeOrder) (*Iterator, error) { + global.Lock() + defer global.Unlock() + return global.ctx.NewIterator(node, order) +} + +// NewIterator constructs a new Iterator starting from the given `Node` and +// iterating with the traversal strategy given by the `order` parameter. Once +// the iteration have finished or you don't need the iterator anymore you must +// dispose it with the Dispose() method (or call it with `defer`). +func (c *Context) NewIterator(node *uast.Node, order TreeOrder) (*Iterator, error) { itMutex.Lock() defer itMutex.Unlock() - ptr := nodeToPtr(node) - it := C.IteratorNew(ptr, C.int(order)) - if it == 0 { + it := C.UastIteratorNew(c.ctx, c.nodeToHandleC(node), C.TreeOrder(int(order))) + if it == nil { return nil, cError("UastIteratorNew") } return &Iterator{ + c: c, root: node, iterPtr: it, finished: false, @@ -403,40 +647,39 @@ func NewIterator(node *uast.Node, order TreeOrder) (*Iterator, error) { // Next retrieves the next `Node` in the tree's traversal or `nil` if there are no more // nodes. Calling `Next()` on a finished iterator after the first `nil` will // return an error.This is thread-safe but not concurrent by an internal global lock. -func (i *Iterator) Next() (*uast.Node, error) { - itMutex.Lock() - defer itMutex.Unlock() - - if i.finished { +func (it *Iterator) Next() (*uast.Node, error) { + if it.finished { return nil, fmt.Errorf("Next() called on finished iterator") } - pnode := C.IteratorNext(i.iterPtr) - if pnode == 0 { + itMutex.Lock() + defer itMutex.Unlock() + + h := handle(C.UastIteratorNext(it.iterPtr)) + if h == 0 { // End of the iteration - i.finished = true + it.finished = true return nil, nil } - return ptrToNode(pnode), nil + return it.c.handleToNode(h), nil } // Iterate function is similar to Next() but returns the `Node`s in a channel. It's mean // to be used with the `for node := range myIter.Iterate() {}` loop. -func (i *Iterator) Iterate() <-chan *uast.Node { +func (it *Iterator) Iterate() <-chan *uast.Node { c := make(chan *uast.Node) - if i.finished { + if it.finished { close(c) return c } go func() { + defer close(c) for { - n, err := i.Next() + n, err := it.Next() if n == nil || err != nil { - close(c) - break + return } - c <- n } }() @@ -447,14 +690,24 @@ func (i *Iterator) Iterate() <-chan *uast.Node { // Dispose must be called once you've finished using the iterator or preventively // with `defer` to free the iterator resources. Failing to do so would produce // a memory leak. -func (i *Iterator) Dispose() { +// +// Deprecated: use Close +func (it *Iterator) Dispose() { + _ = it.Close() +} + +// Close must be called once you've finished using the iterator or preventively +// with `defer` to free the iterator resources. Failing to do so would produce +// a memory leak. +func (it *Iterator) Close() error { itMutex.Lock() defer itMutex.Unlock() - if i.iterPtr != 0 { - C.IteratorFree(i.iterPtr) - i.iterPtr = 0 + if it.iterPtr != nil { + C.UastIteratorFree(it.iterPtr) + it.iterPtr = nil } - i.finished = true - i.root = nil + it.finished = true + it.root = nil + return nil } diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.h b/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.h index 1bdbfcf..28a3c7e 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.h +++ b/vendor/gopkg.in/bblfsh/client-go.v2/tools/bindings.h @@ -13,117 +13,114 @@ #include "libuast/uast.h" #endif -extern char* goGetInternalType(uintptr_t); -extern char* goGetToken(uintptr_t); -extern int goGetChildrenSize(uintptr_t); -extern uintptr_t goGetChild(uintptr_t, int); -extern int goGetRolesSize(uintptr_t); -extern uint16_t goGetRole(uintptr_t, int); -extern int goGetPropertiesSize(uintptr_t); -extern char* goGetPropertyKey(uintptr_t, int); -extern char* goGetPropertyValue(uintptr_t, int); -extern bool goHasStartOffset(uintptr_t); -extern uint32_t goGetStartOffset(uintptr_t); -extern bool goHasStartLine(uintptr_t); -extern uint32_t goGetStartLine(uintptr_t); -extern bool goHasStartCol(uintptr_t); -extern uint32_t goGetStartCol(uintptr_t); -extern bool goHasEndOffset(uintptr_t); -extern uint32_t goGetEndOffset(uintptr_t); -extern bool goHasEndLine(uintptr_t); -extern uint32_t goGetEndLine(uintptr_t); -extern bool goHasEndCol(uintptr_t); -extern uint32_t goGetEndCol(uintptr_t); +extern char* goGetInternalType(Uast*, NodeHandle); +extern char* goGetToken(Uast*, NodeHandle); +extern int goGetChildrenSize(Uast*, NodeHandle); +extern NodeHandle goGetChild(Uast*, NodeHandle, int); +extern int goGetRolesSize(Uast*, NodeHandle); +extern uint16_t goGetRole(Uast*, NodeHandle, int); +extern int goGetPropertiesSize(Uast*, NodeHandle); +extern char* goGetPropertyKey(Uast*, NodeHandle, int); +extern char* goGetPropertyValue(Uast*, NodeHandle, int); +extern bool goHasStartOffset(Uast*, NodeHandle); +extern uint32_t goGetStartOffset(Uast*, NodeHandle); +extern bool goHasStartLine(Uast*, NodeHandle); +extern uint32_t goGetStartLine(Uast*, NodeHandle); +extern bool goHasStartCol(Uast*, NodeHandle); +extern uint32_t goGetStartCol(Uast*, NodeHandle); +extern bool goHasEndOffset(Uast*, NodeHandle); +extern uint32_t goGetEndOffset(Uast*, NodeHandle); +extern bool goHasEndLine(Uast*, NodeHandle); +extern uint32_t goGetEndLine(Uast*, NodeHandle); +extern bool goHasEndCol(Uast*, NodeHandle); +extern uint32_t goGetEndCol(Uast*, NodeHandle); -static const char *InternalType(const void *node) { - return goGetInternalType((uintptr_t)node); +static const char *InternalType(const Uast* ctx, NodeHandle node) { + return goGetInternalType((Uast*)ctx, node); } -static const char *Token(const void *node) { - return goGetToken((uintptr_t)node); +static const char *Token(const Uast* ctx, NodeHandle node) { + return goGetToken((Uast*)ctx, node); } -static size_t ChildrenSize(const void *node) { - return goGetChildrenSize((uintptr_t)node); +static size_t ChildrenSize(const Uast* ctx, NodeHandle node) { + return goGetChildrenSize((Uast*)ctx, node); } -static void *ChildAt(const void *data, int index) { - return (void*)goGetChild((uintptr_t)data, index); +static NodeHandle ChildAt(const Uast* ctx, NodeHandle data, int index) { + return goGetChild((Uast*)ctx, data, index); } -static size_t RolesSize(const void *node) { - return goGetRolesSize((uintptr_t)node); +static size_t RolesSize(const Uast* ctx, NodeHandle node) { + return goGetRolesSize((Uast*)ctx, node); } -static uint16_t RoleAt(const void *node, int index) { - return goGetRole((uintptr_t)node, index); +static uint16_t RoleAt(const Uast* ctx, NodeHandle node, int index) { + return goGetRole((Uast*)ctx, node, index); } -static size_t PropertiesSize(const void *node) { - return goGetPropertiesSize((uintptr_t)node); +static size_t PropertiesSize(const Uast* ctx, NodeHandle node) { + return goGetPropertiesSize((Uast*)ctx, node); } -static const char *PropertyKeyAt(const void *node, int index) { - return goGetPropertyKey((uintptr_t)node, index); +static const char *PropertyKeyAt(const Uast* ctx, NodeHandle node, int index) { + return goGetPropertyKey((Uast*)ctx, node, index); } -static const char *PropertyValueAt(const void *node, int index) { - return goGetPropertyValue((uintptr_t)node, index); +static const char *PropertyValueAt(const Uast* ctx, NodeHandle node, int index) { + return goGetPropertyValue((Uast*)ctx, node, index); } -static bool HasStartOffset(const void *node) { - return goHasStartOffset((uintptr_t)node); +static bool HasStartOffset(const Uast* ctx, NodeHandle node) { + return goHasStartOffset((Uast*)ctx, node); } -static uint32_t StartOffset(const void *node) { - return goGetStartOffset((uintptr_t)node); +static uint32_t StartOffset(const Uast* ctx, NodeHandle node) { + return goGetStartOffset((Uast*)ctx, node); } -static bool HasStartLine(const void *node) { - return goHasStartLine((uintptr_t)node); +static bool HasStartLine(const Uast* ctx, NodeHandle node) { + return goHasStartLine((Uast*)ctx, node); } -static uint32_t StartLine(const void *node) { - return goGetStartLine((uintptr_t)node); +static uint32_t StartLine(const Uast* ctx, NodeHandle node) { + return goGetStartLine((Uast*)ctx, node); } -static bool HasStartCol(const void *node) { - return goHasStartCol((uintptr_t)node); +static bool HasStartCol(const Uast* ctx, NodeHandle node) { + return goHasStartCol((Uast*)ctx, node); } -static uint32_t StartCol(const void *node) { - return goGetStartCol((uintptr_t)node); +static uint32_t StartCol(const Uast* ctx, NodeHandle node) { + return goGetStartCol((Uast*)ctx, node); } -static bool HasEndOffset(const void *node) { - return goHasEndOffset((uintptr_t)node); +static bool HasEndOffset(const Uast* ctx, NodeHandle node) { + return goHasEndOffset((Uast*)ctx, node); } -static uint32_t EndOffset(const void *node) { - return goGetEndOffset((uintptr_t)node); +static uint32_t EndOffset(const Uast* ctx, NodeHandle node) { + return goGetEndOffset((Uast*)ctx, node); } -static bool HasEndLine(const void *node) { - return goHasEndLine((uintptr_t)node); +static bool HasEndLine(const Uast* ctx, NodeHandle node) { + return goHasEndLine((Uast*)ctx, node); } -static uint32_t EndLine(const void *node) { - return goGetEndLine((uintptr_t)node); +static uint32_t EndLine(const Uast* ctx, NodeHandle node) { + return goGetEndLine((Uast*)ctx, node); } -static bool HasEndCol(const void *node) { - return goHasEndCol((uintptr_t)node); +static bool HasEndCol(const Uast* ctx, NodeHandle node) { + return goHasEndCol((Uast*)ctx, node); } -static uint32_t EndCol(const void *node) { - return goGetEndCol((uintptr_t)node); +static uint32_t EndCol(const Uast* ctx, NodeHandle node) { + return goGetEndCol((Uast*)ctx, node); } -static Uast *ctx; -static Nodes *nodes; - -static void CreateUast() { - ctx = UastNew((NodeIface){ +static Uast* CreateUast() { + return UastNew((NodeIface){ .InternalType = InternalType, .Token = Token, .ChildrenSize = ChildrenSize, @@ -148,57 +145,4 @@ static void CreateUast() { }); } -static bool Filter(uintptr_t node_ptr, const char *query) { - nodes = UastFilter(ctx, (void*)node_ptr, query); - return nodes != NULL; -} - -static int FilterBool(uintptr_t node_ptr, const char *query) { - bool ok; - bool res = UastFilterBool(ctx, (void*)node_ptr, query, &ok); - if (!ok) { - return -1; - } - return (int)res; -} - -static double FilterNumber(uintptr_t node_ptr, const char *query, int *ok) { - bool c_ok; - double res = UastFilterNumber(ctx, (void*)node_ptr, query, &c_ok); - if (!c_ok) { - *ok = 0; - } else { - *ok = 1; - } - return res; -} - -static const char *FilterString(uintptr_t node_ptr, const char *query) { - return UastFilterString(ctx, (void*)node_ptr, query); -} - -static uintptr_t IteratorNew(uintptr_t node_ptr, int order) { - return (uintptr_t)UastIteratorNew(ctx, (void *)node_ptr, order); -} - -static uintptr_t IteratorNext(uintptr_t iter) { - return (uintptr_t)UastIteratorNext((void*)iter); -} - -static void IteratorFree(uintptr_t iter) { - UastIteratorFree((void*)iter); -} - -static char *Error() { - return LastError(); -} - -static int Size() { - return NodesSize(nodes); -} - -static uintptr_t At(int i) { - return (uintptr_t)NodeAt(nodes, i); -} - #endif // CLIENT_GO_BINDINGS_H_ diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/tools/node_iface.h b/vendor/gopkg.in/bblfsh/client-go.v2/tools/node_iface.h deleted file mode 100644 index 79a7fe8..0000000 --- a/vendor/gopkg.in/bblfsh/client-go.v2/tools/node_iface.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef LIBUAST_NODE_IFACE_H_ -#define LIBUAST_NODE_IFACE_H_ - -#include -#include -#include - -// This interface must be implemented to create a Uast context. -typedef struct NodeIface { - const char *(*InternalType)(const void *); - const char *(*Token)(const void *); - - // Children - size_t (*ChildrenSize)(const void *); - void *(*ChildAt)(const void *, int); - - // Roles - size_t (*RolesSize)(const void *); - uint16_t (*RoleAt)(const void *, int); - - // Properties - size_t (*PropertiesSize)(const void *); - const char *(*PropertyKeyAt)(const void *, int); - const char *(*PropertyValueAt)(const void *, int); - - // Postion - bool (*HasStartOffset)(const void *); - uint32_t (*StartOffset)(const void *); - bool (*HasStartLine)(const void *); - uint32_t (*StartLine)(const void *); - bool (*HasStartCol)(const void *); - uint32_t (*StartCol)(const void *); - - bool (*HasEndOffset)(const void *); - uint32_t (*EndOffset)(const void *); - bool (*HasEndLine)(const void *); - uint32_t (*EndLine)(const void *); - bool (*HasEndCol)(const void *); - uint32_t (*EndCol)(const void *); - -} NodeIface; - -#endif // LIBUAST_NODE_IFACE_H_ diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/tools/nodes.h b/vendor/gopkg.in/bblfsh/client-go.v2/tools/nodes.h deleted file mode 100644 index 7d6ed3e..0000000 --- a/vendor/gopkg.in/bblfsh/client-go.v2/tools/nodes.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef LIBUAST_NODES_H_ -#define LIBUAST_NODES_H_ - -#include "export.h" - -typedef struct Nodes Nodes; - -// Returns the amount of nodes -EXPORT int NodesSize(const Nodes *nodes); - -// Returns the node at the given index. -EXPORT void *NodeAt(const Nodes *nodes, int index); - -// Releases the resources associated with nodes -EXPORT void NodesFree(Nodes *nodes); - -#endif // LIBUAST_NODES_H_ diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.cc b/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.cc index 07e9713..57e6ed8 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.cc +++ b/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.cc @@ -31,14 +31,14 @@ struct Uast { struct UastIterator { const Uast *ctx; TreeOrder order; - std::deque pending; - std::set visited; - void* (*nodeTransform)(void*); + std::deque pending; + std::set visited; + NodeHandle (*nodeTransform)(NodeHandle); bool preloaded; }; struct Nodes { - std::vector results; + std::vector results; int len; int cap; }; @@ -56,22 +56,22 @@ const std::vector Type2Str = { "XSLT_TREE" }; -static xmlDocPtr CreateDocument(const Uast *ctx, void *node); -static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, xmlNodePtr parent); +static xmlDocPtr CreateDocument(const Uast *ctx, NodeHandle node); +static xmlNodePtr CreateXmlNode(const Uast *ctx, NodeHandle node, xmlNodePtr parent); void Error(void *ctx, const char *msg, ...); // Adds the children of the node to the iterator queue and returns // if the node was already checked, which will happen with leaf nodes // or nodes which childs already processed. Used for the POST_ORDER // iterative traversal algorithm. -static bool Visited(UastIterator *iter, void *node); +static bool Visited(UastIterator *iter, NodeHandle node); // Get the next element in pre-order traversal mode. -static void *PreOrderNext(UastIterator *iter); +static NodeHandle PreOrderNext(UastIterator *iter); // Get the next element in level-order traversal mode. -static void *LevelOrderNext(UastIterator *iter); +static NodeHandle LevelOrderNext(UastIterator *iter); // Get the next element in post-order traversal mode. -static void *PostOrderNext(UastIterator *iter); +static NodeHandle PostOrderNext(UastIterator *iter); // Get the next element in position-order traversal mode. -static void *PositionOrderNext(UastIterator *iter); +static NodeHandle PositionOrderNext(UastIterator *iter); class QueryResult { xmlXPathContextPtr xpathCtx; @@ -80,7 +80,7 @@ class QueryResult { public: xmlXPathObjectPtr xpathObj; - QueryResult(const Uast *ctx, void *node, const char *query, + QueryResult(const Uast *ctx, NodeHandle node, const char *query, xmlXPathObjectType expected) { assert(ctx); @@ -136,7 +136,7 @@ class CreateXMLNodeException: public std::runtime_error { CreateXMLNodeException(): std::runtime_error("") {} }; -static UastIterator *UastIteratorNewBase(const Uast *ctx, void *node, TreeOrder order) { +static UastIterator *UastIteratorNewBase(const Uast *ctx, NodeHandle node, TreeOrder order) { assert(ctx); assert(node); @@ -172,13 +172,13 @@ int NodesSize(const Nodes *nodes) { return nodes->len; } -void *NodeAt(const Nodes *nodes, int index) { +NodeHandle NodeAt(const Nodes *nodes, int index) { assert(nodes); if (index < nodes->len) { return nodes->results[index]; } - return nullptr; + return 0; } Uast *UastNew(NodeIface iface) { @@ -209,7 +209,7 @@ void UastFree(Uast *ctx) { xmlCleanupParser(); } -UastIterator *UastIteratorNew(const Uast *ctx, void *node, TreeOrder order) { +UastIterator *UastIteratorNew(const Uast *ctx, NodeHandle node, TreeOrder order) { assert(ctx); assert(node); @@ -226,8 +226,8 @@ void UastIteratorFree(UastIterator *iter) { } } -UastIterator *UastIteratorNewWithTransformer(const Uast *ctx, void *node, - TreeOrder order, void*(*transform)(void*)) { +UastIterator *UastIteratorNewWithTransformer(const Uast *ctx, NodeHandle node, + TreeOrder order, NodeHandle(*transform)(NodeHandle)) { assert(ctx); assert(node); @@ -239,11 +239,11 @@ UastIterator *UastIteratorNewWithTransformer(const Uast *ctx, void *node, return iter; } -void *UastIteratorNext(UastIterator *iter) { +NodeHandle UastIteratorNext(UastIterator *iter) { assert(iter); if (iter == nullptr || iter->pending.empty()) { - return nullptr; + return 0; } switch(iter->order) { @@ -263,7 +263,7 @@ NodeIface UastGetIface(const Uast *ctx) { return ctx->iface; } -Nodes *UastFilter(const Uast *ctx, void *node, const char *query) { +Nodes *UastFilter(const Uast *ctx, NodeHandle node, const char *query) { assert(ctx); assert(node); assert(query); @@ -281,8 +281,11 @@ Nodes *UastFilter(const Uast *ctx, void *node, const char *query) { auto nodeset = queryResult.xpathObj->nodesetval; if (!nodeset) { - Error(nullptr, "Unable to get array of result nodes\n"); + if (NodesSetSize(nodes, 0) != 0) { + Error(nullptr, "Unable to set nodes size\n"); throw std::runtime_error(""); + } + return nodes; } auto results = nodeset->nodeTab; @@ -304,7 +307,7 @@ Nodes *UastFilter(const Uast *ctx, void *node, const char *query) { size_t nodeIdx = 0; for (int i = 0; i < size; i++) { if (results[i] != nullptr && results[i]->_private != nullptr) { - nodes->results[nodeIdx++] = results[i]->_private; + nodes->results[nodeIdx++] = (NodeHandle)(results[i]->_private); } } @@ -316,7 +319,7 @@ Nodes *UastFilter(const Uast *ctx, void *node, const char *query) { return nullptr; } -bool UastFilterBool(const Uast *ctx, void *node, const char *query, +bool UastFilterBool(const Uast *ctx, NodeHandle node, const char *query, bool *ok) { assert(ctx); assert(node); @@ -332,7 +335,7 @@ bool UastFilterBool(const Uast *ctx, void *node, const char *query, return false; } -double UastFilterNumber(const Uast *ctx, void *node, const char *query, +double UastFilterNumber(const Uast *ctx, NodeHandle node, const char *query, bool *ok) { assert(ctx); assert(node); @@ -348,7 +351,7 @@ double UastFilterNumber(const Uast *ctx, void *node, const char *query, return 0; } -const char *UastFilterString(const Uast *ctx, void *node, const char *query) { +const char *UastFilterString(const Uast *ctx, NodeHandle node, const char *query) { assert(ctx); assert(node); assert(query); @@ -393,14 +396,14 @@ int NodesCap(const Nodes *nodes) { return nodes->cap; } -static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, +static xmlNodePtr CreateXmlNode(const Uast *ctx, NodeHandle node, xmlNodePtr parent) { assert(ctx); assert(node); char buf[BUF_SIZE]; - const char *internal_type = ctx->iface.InternalType(node); + const char *internal_type = ctx->iface.InternalType(ctx, node); xmlNodePtr xmlNode = static_cast(xmlNewNode(nullptr, BAD_CAST(internal_type))); int children_size = 0; int roles_size = 0; @@ -411,7 +414,7 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, throw CreateXMLNodeException(); } - xmlNode->_private = node; + xmlNode->_private = (void*)node; if (parent) { if (!xmlAddChild(parent, xmlNode)) { throw CreateXMLNodeException(); @@ -419,7 +422,7 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, } // Token - token = ctx->iface.Token(node); + token = ctx->iface.Token(ctx, node); if (token) { if (!xmlNewProp(xmlNode, BAD_CAST("token"), BAD_CAST(token))) { throw CreateXMLNodeException(); @@ -427,9 +430,9 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, } // Roles - roles_size = ctx->iface.RolesSize(node); + roles_size = ctx->iface.RolesSize(ctx, node); for (int i = 0; i < roles_size; i++) { - uint16_t role = ctx->iface.RoleAt(node, i); + uint16_t role = ctx->iface.RoleAt(ctx, node, i); const char *role_name = RoleNameForId(role); if (role_name != nullptr) { if (!xmlNewProp(xmlNode, BAD_CAST(role_name), nullptr)) { @@ -439,17 +442,17 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, } // Properties - for (size_t i = 0; i < ctx->iface.PropertiesSize(node); i++) { - const char *key = ctx->iface.PropertyKeyAt(node, i); - const char *value = ctx->iface.PropertyValueAt(node, i); + for (size_t i = 0; i < ctx->iface.PropertiesSize(ctx, node); i++) { + const char *key = ctx->iface.PropertyKeyAt(ctx, node, i); + const char *value = ctx->iface.PropertyValueAt(ctx, node, i); if (!xmlNewProp(xmlNode, BAD_CAST(key), BAD_CAST(value))) { throw CreateXMLNodeException(); } } // Position - if (ctx->iface.HasStartOffset(node)) { - int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.StartOffset(node)); + if (ctx->iface.HasStartOffset(ctx, node)) { + int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.StartOffset(ctx, node)); if (ret < 0 || ret >= BUF_SIZE) { throw CreateXMLNodeException("Unable to set start offset\n"); } @@ -457,8 +460,8 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, throw CreateXMLNodeException(); } } - if (ctx->iface.HasStartLine(node)) { - int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.StartLine(node)); + if (ctx->iface.HasStartLine(ctx, node)) { + int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.StartLine(ctx, node)); if (ret < 0 || ret >= BUF_SIZE) { throw CreateXMLNodeException("Unable to start line\n"); } @@ -466,8 +469,8 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, throw CreateXMLNodeException(); } } - if (ctx->iface.HasStartCol(node)) { - int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.StartCol(node)); + if (ctx->iface.HasStartCol(ctx, node)) { + int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.StartCol(ctx, node)); if (ret < 0 || ret >= BUF_SIZE) { throw CreateXMLNodeException("Unable to start column\n"); } @@ -475,8 +478,8 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, throw CreateXMLNodeException(); } } - if (ctx->iface.HasEndOffset(node)) { - int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.EndOffset(node)); + if (ctx->iface.HasEndOffset(ctx, node)) { + int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.EndOffset(ctx, node)); if (ret < 0 || ret >= BUF_SIZE) { throw CreateXMLNodeException("Unable to set end offset\n"); } @@ -484,8 +487,8 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, throw CreateXMLNodeException(); } } - if (ctx->iface.HasEndLine(node)) { - int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.EndLine(node)); + if (ctx->iface.HasEndLine(ctx, node)) { + int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.EndLine(ctx, node)); if (ret < 0 || ret >= BUF_SIZE) { Error(nullptr, "Unable to set end line\n"); throw CreateXMLNodeException(); @@ -494,8 +497,8 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, throw CreateXMLNodeException(); } } - if (ctx->iface.HasEndCol(node)) { - int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.EndCol(node)); + if (ctx->iface.HasEndCol(ctx, node)) { + int ret = snprintf(buf, BUF_SIZE, "%" PRIu32, ctx->iface.EndCol(ctx, node)); if (ret < 0 || ret >= BUF_SIZE) { throw CreateXMLNodeException("Unable to set end column\n"); } @@ -505,9 +508,9 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, } // Recursivelly visit all children - children_size = ctx->iface.ChildrenSize(node); + children_size = ctx->iface.ChildrenSize(ctx, node); for (int i = 0; i < children_size; i++) { - void *child = ctx->iface.ChildAt(node, i); + NodeHandle child = ctx->iface.ChildAt(ctx, node, i); if (!CreateXmlNode(ctx, child, xmlNode)) { throw CreateXMLNodeException(); } @@ -520,7 +523,7 @@ static xmlNodePtr CreateXmlNode(const Uast *ctx, void *node, return nullptr; } -static xmlDocPtr CreateDocument(const Uast *ctx, void *node) { +static xmlDocPtr CreateDocument(const Uast *ctx, NodeHandle node) { assert(ctx); assert(node); @@ -545,22 +548,22 @@ void Error(void *ctx, const char *msg, ...) { va_end(arg_ptr); } -static void *transformChildAt(UastIterator *iter, void *parent, size_t pos) { +static NodeHandle transformChildAt(UastIterator *iter, NodeHandle parent, size_t pos) { assert(iter); assert(parent); - auto child = iter->ctx->iface.ChildAt(parent, pos); + auto child = iter->ctx->iface.ChildAt(iter->ctx, parent, pos); return iter->nodeTransform ? iter->nodeTransform(child): child; } -static bool Visited(UastIterator *iter, void *node) { +static bool Visited(UastIterator *iter, NodeHandle node) { assert(iter); assert(node); const bool visited = iter->visited.find(node) != iter->visited.end(); if(!visited) { - int children_size = iter->ctx->iface.ChildrenSize(node); + int children_size = iter->ctx->iface.ChildrenSize(iter->ctx, node); for (int i = children_size - 1; i >= 0; i--) { iter->pending.push_front(transformChildAt(iter, node, i)); } @@ -570,17 +573,17 @@ static bool Visited(UastIterator *iter, void *node) { return visited; } -static void *PreOrderNext(UastIterator *iter) { +static NodeHandle PreOrderNext(UastIterator *iter) { assert(iter); - void *retNode = iter->pending.front(); + NodeHandle retNode = iter->pending.front(); iter->pending.pop_front(); - if (retNode == nullptr) { - return nullptr; + if (retNode == 0) { + return 0; } - int children_size = iter->ctx->iface.ChildrenSize(retNode); + int children_size = iter->ctx->iface.ChildrenSize(iter->ctx, retNode); for (int i = children_size - 1; i >= 0; i--) { iter->pending.push_front(transformChildAt(iter, retNode, i)); } @@ -588,16 +591,16 @@ static void *PreOrderNext(UastIterator *iter) { return retNode; } -static void *LevelOrderNext(UastIterator *iter) { +static NodeHandle LevelOrderNext(UastIterator *iter) { assert(iter); - void *retNode = iter->pending.front(); + NodeHandle retNode = iter->pending.front(); - if (retNode == nullptr) { - return nullptr; + if (retNode == 0) { + return 0; } - int children_size = iter->ctx->iface.ChildrenSize(retNode); + int children_size = iter->ctx->iface.ChildrenSize(iter->ctx, retNode); for (int i = 0; i < children_size; i++) { iter->pending.push_back(transformChildAt(iter, retNode, i)); } @@ -606,12 +609,12 @@ static void *LevelOrderNext(UastIterator *iter) { return retNode; } -static void *PostOrderNext(UastIterator *iter) { +static NodeHandle PostOrderNext(UastIterator *iter) { assert(iter); - void *curNode = iter->pending.front(); - if (curNode == nullptr) { - return nullptr; + NodeHandle curNode = iter->pending.front(); + if (curNode == 0) { + return 0; } while(!Visited(iter, curNode)) { @@ -628,23 +631,23 @@ static void sortPendingByPosition(UastIterator *iter) { iter->pending.pop_front(); UastIterator *subiter = UastIteratorNew(iter->ctx, root, PRE_ORDER); - void *curNode = nullptr; - while ((curNode = UastIteratorNext(subiter)) != nullptr) { + NodeHandle curNode = 0; + while ((curNode = UastIteratorNext(subiter)) != 0) { iter->pending.push_back(curNode); } UastIteratorFree(subiter); - std::sort(iter->pending.begin(), iter->pending.end(), [&iter](void *i, void *j) { + std::sort(iter->pending.begin(), iter->pending.end(), [&iter](NodeHandle i, NodeHandle j) { auto ic = iter->ctx->iface; - if (ic.HasStartOffset(i) && ic.HasStartOffset(j)) { - return ic.StartOffset(i) < ic.StartOffset(j); + if (ic.HasStartOffset(iter->ctx, i) && ic.HasStartOffset(iter->ctx, j)) { + return ic.StartOffset(iter->ctx, i) < ic.StartOffset(iter->ctx, j); } // Continue: some didn't have offset, check by line/col - auto firstLine = ic.HasStartLine(i) ? ic.StartLine(i) : 0; - auto firstCol = ic.HasStartCol(i) ? ic.StartCol(i) : 0; - auto secondLine = ic.HasStartLine(j) ? ic.StartLine(j) : 0; - auto secondCol = ic.HasStartCol(j) ? ic.StartCol(j) : 0; + auto firstLine = ic.HasStartLine(iter->ctx, i) ? ic.StartLine(iter->ctx, i) : 0; + auto firstCol = ic.HasStartCol(iter->ctx, i) ? ic.StartCol(iter->ctx, i) : 0; + auto secondLine = ic.HasStartLine(iter->ctx, j) ? ic.StartLine(iter->ctx, j) : 0; + auto secondCol = ic.HasStartCol(iter->ctx, j) ? ic.StartCol(iter->ctx, j) : 0; if (firstLine == secondLine) { return firstCol < secondCol; @@ -654,7 +657,7 @@ static void sortPendingByPosition(UastIterator *iter) { }); } -static void *PositionOrderNext(UastIterator *iter) { +static NodeHandle PositionOrderNext(UastIterator *iter) { assert(iter); if (!iter->preloaded) { @@ -663,9 +666,9 @@ static void *PositionOrderNext(UastIterator *iter) { iter->preloaded = true; } - void *retNode = iter->pending.front(); - if (retNode == nullptr) { - return nullptr; + NodeHandle retNode = iter->pending.front(); + if (retNode == 0) { + return 0; } iter->pending.pop_front(); diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.h b/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.h index ba872ae..976d063 100644 --- a/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.h +++ b/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast.h @@ -1,20 +1,70 @@ #ifndef LIBUAST_UAST_H_ #define LIBUAST_UAST_H_ +#include "export.h" + +#include +#include +#include + #ifdef __cplusplus extern "C" { #endif -#include "export.h" -#include "node_iface.h" -#include "nodes.h" - // Uast stores the general context required for library functions. // It must be initialized with `UastNew` passing a valid implementation of the // `NodeIface` interface. // Once it is not used anymore, it shall be released calling `UastFree`. typedef struct Uast Uast; +typedef uintptr_t NodeHandle; + +// This interface must be implemented to create a Uast context. +typedef struct NodeIface { + const char *(*InternalType)(const Uast*, NodeHandle); + const char *(*Token)(const Uast*, NodeHandle); + + // Children + size_t (*ChildrenSize)(const Uast*, NodeHandle); + NodeHandle (*ChildAt)(const Uast*, NodeHandle, int); + + // Roles + size_t (*RolesSize)(const Uast*, NodeHandle); + uint16_t (*RoleAt)(const Uast*, NodeHandle, int); + + // Properties + size_t (*PropertiesSize)(const Uast*, NodeHandle); + const char *(*PropertyKeyAt)(const Uast*, NodeHandle, int); + const char *(*PropertyValueAt)(const Uast*, NodeHandle, int); + + // Postion + bool (*HasStartOffset)(const Uast*, NodeHandle); + uint32_t (*StartOffset)(const Uast*, NodeHandle); + bool (*HasStartLine)(const Uast*, NodeHandle); + uint32_t (*StartLine)(const Uast*, NodeHandle); + bool (*HasStartCol)(const Uast*, NodeHandle); + uint32_t (*StartCol)(const Uast*, NodeHandle); + + bool (*HasEndOffset)(const Uast*, NodeHandle); + uint32_t (*EndOffset)(const Uast*, NodeHandle); + bool (*HasEndLine)(const Uast*, NodeHandle); + uint32_t (*EndLine)(const Uast*, NodeHandle); + bool (*HasEndCol)(const Uast*, NodeHandle); + uint32_t (*EndCol)(const Uast*, NodeHandle); + +} NodeIface; + +typedef struct Nodes Nodes; + +// Returns the amount of nodes +EXPORT int NodesSize(const Nodes *nodes); + +// Returns the node at the given index. +EXPORT NodeHandle NodeAt(const Nodes *nodes, int index); + +// Releases the resources associated with nodes +EXPORT void NodesFree(Nodes *nodes); + // An UastIterator is used to keep the state of the current iteration over the tree. // It's initialized with UastIteratorNew, used with UastIteratorNext and freed // with UastIteratorFree. @@ -58,23 +108,23 @@ EXPORT void UastFree(Uast *ctx); // It will return an error if the query has a return type that is not a // node list. In that case, you should use one of the typed filter functions // (`UastFilterBool`, `UastFilterNumber` or `UastFilterString`). -EXPORT Nodes *UastFilter(const Uast *ctx, void *node, const char *query); +EXPORT Nodes *UastFilter(const Uast *ctx, NodeHandle node, const char *query); // Returns a integer value as result of executing the XPath query with bool result, // with `1` meaning `true` and `0` false. If there is any error, the flag `ok` will // be set to false. The parameters have the same meaning as `UastFilter`. -EXPORT bool UastFilterBool(const Uast *ctx, void *node, const char *query, bool *ok); +EXPORT bool UastFilterBool(const Uast *ctx, NodeHandle node, const char *query, bool *ok); // Returns a `double` value as result of executing the XPath query with number result. // The parameters have the same meaning as `UastFilter`. If there is any error, // the flag `ok` will be set to false. -EXPORT double UastFilterNumber(const Uast *ctx, void *node, const char *query, bool *ok); +EXPORT double UastFilterNumber(const Uast *ctx, NodeHandle node, const char *query, bool *ok); // Returns a `const char*` value as result of executing the XPath query with // a string result. The parameters have the same meaning as `UastFilter`. The user // takes ownership of the returned `const char *` and thus must free it. // If there is any error, the return value will be `NULL`. -EXPORT const char *UastFilterString(const Uast *ctx, void *node, const char *query); +EXPORT const char *UastFilterString(const Uast *ctx, NodeHandle node, const char *query); // Create a new UastIterator pointer. This will allow you to traverse the UAST // calling UastIteratorNext. The node argument will be user as the root node of @@ -83,21 +133,21 @@ EXPORT const char *UastFilterString(const Uast *ctx, void *node, const char *que // be frees using UastIteratorFree. // // Returns NULL and sets LastError if the UastIterator couldn't initialize. -EXPORT UastIterator *UastIteratorNew(const Uast *ctx, void *node, TreeOrder order); +EXPORT UastIterator *UastIteratorNew(const Uast *ctx, NodeHandle node, TreeOrder order); // Same as UastIteratorNew, but also allows to specify a transform function taking a node // and returning it. This is specially useful when the bindings need to do operations like // increasing / decreasing the language reference count when new nodes are added to the // iterator internal data structures. -UastIterator *UastIteratorNewWithTransformer(const Uast *ctx, void *node, - TreeOrder order, void*(*transform)(void*)); +UastIterator *UastIteratorNewWithTransformer(const Uast *ctx, NodeHandle node, + TreeOrder order, NodeHandle(*transform)(NodeHandle)); // Frees a UastIterator. EXPORT void UastIteratorFree(UastIterator *iter); // Retrieve the next node of the traversal of an UAST tree or NULL if the // traversal has finished. -EXPORT void *UastIteratorNext(UastIterator *iter); +EXPORT NodeHandle UastIteratorNext(UastIterator *iter); // Returns a string with the latest error. // It may be an empty string if there's been no error. diff --git a/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast_v2.go b/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast_v2.go new file mode 100644 index 0000000..e025d45 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/client-go.v2/tools/uast_v2.go @@ -0,0 +1,18 @@ +package tools + +import ( + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/query" + "gopkg.in/bblfsh/sdk.v2/uast/query/xpath" +) + +// FilterXPath takes a `Node` as returned by UAST() call and an xpath query and filters the tree, +// returning the iterator of nodes that satisfy the given query. +func (c *Context) FilterXPath(node nodes.External, query string) (query.Iterator, error) { + return FilterXPath(node, query) +} + +// FilterXPath is a shorthand for Context.FilterXPath. +func FilterXPath(node nodes.External, query string) (query.Iterator, error) { + return xpath.New().Execute(node, query) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/LICENSE b/vendor/gopkg.in/bblfsh/sdk.v2/LICENSE new file mode 100644 index 0000000..9cecc1d --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/driver/driver.go b/vendor/gopkg.in/bblfsh/sdk.v2/driver/driver.go new file mode 100644 index 0000000..3e5ab66 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/driver/driver.go @@ -0,0 +1,80 @@ +// Package driver contains all the logic to build a driver. +package driver + +import ( + "bytes" + "context" + + "gopkg.in/bblfsh/sdk.v2/driver/manifest" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +type Mode int + +const ( + ModeNative = Mode(1 << iota) + ModePreprocessed + ModeAnnotated + ModeSemantic +) + +const ModeDefault = ModeSemantic + +// Module is an interface for a generic module instance. +type Module interface { + Start() error + Close() error +} + +// Driver is an interface for a language driver that returns UAST. +type Driver interface { + Parse(ctx context.Context, mode Mode, src string) (nodes.Node, error) +} + +// DriverModule is an interface for a driver instance. +type DriverModule interface { + Module + Driver + Manifest() (manifest.Manifest, error) +} + +// Native is a base interface of a language driver that returns a native AST. +type Native interface { + Module + Parse(ctx context.Context, src string) (nodes.Node, error) +} + +// ErrMulti joins multiple errors. +type ErrMulti struct { + Header string + Errors []string +} + +func (e ErrMulti) Error() string { + buf := bytes.NewBuffer(nil) + if e.Header != "" { + buf.WriteString(e.Header + ":\n") + } + for _, s := range e.Errors { + buf.WriteString(s) + buf.WriteString("\n") + } + return buf.String() +} + +func MultiError(errs []string) error { + return &ErrMulti{Errors: errs} +} + +func PartialParse(ast nodes.Node, errs []string) error { + return &ErrPartialParse{ + ErrMulti: ErrMulti{Header: "partial parse", Errors: errs}, + AST: ast, + } +} + +// ErrPartialParse is returned when driver was not able to parse the whole source file. +type ErrPartialParse struct { + ErrMulti + AST nodes.Node +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/driver/impl.go b/vendor/gopkg.in/bblfsh/sdk.v2/driver/impl.go new file mode 100644 index 0000000..4362982 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/driver/impl.go @@ -0,0 +1,57 @@ +package driver + +import ( + "context" + "fmt" + + "gopkg.in/bblfsh/sdk.v2/driver/manifest" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +// NewDriver returns a new Driver instance based on the given ObjectToNode and list of transformers. +func NewDriverFrom(d Native, m *manifest.Manifest, t Transforms) (DriverModule, error) { + if d == nil { + return nil, fmt.Errorf("no driver implementation") + } else if m == nil { + return nil, fmt.Errorf("no manifest") + } + return &driverImpl{d: d, m: m, t: t}, nil +} + +// Driver implements a bblfsh driver, a driver is on charge of transforming a +// source code into an AST and a UAST. To transform the AST into a UAST, a +// `uast.ObjectToNode`` and a series of `tranformer.Transformer` are used. +// +// The `Parse` and `NativeParse` requests block the driver until the request is +// done, since the communication with the native driver is a single-channel +// synchronous communication over stdin/stdout. +type driverImpl struct { + d Native + + m *manifest.Manifest + t Transforms +} + +func (d *driverImpl) Start() error { + return d.d.Start() +} + +func (d *driverImpl) Close() error { + return d.d.Close() +} + +// Parse process a protocol.ParseRequest, calling to the native driver. It a +// parser request is done to the internal native driver and the the returned +// native AST is transform to UAST. +func (d *driverImpl) Parse(ctx context.Context, mode Mode, src string) (nodes.Node, error) { + ast, err := d.d.Parse(ctx, src) + if err != nil { + return nil, err + } + return d.t.Do(mode, src, ast) +} + +// Manifest returns a driver manifest. +func (d *driverImpl) Manifest() (manifest.Manifest, error) { + return *d.m, nil // TODO: clone +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/driver/manifest/manifest.go b/vendor/gopkg.in/bblfsh/sdk.v2/driver/manifest/manifest.go new file mode 100644 index 0000000..13cceaf --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/driver/manifest/manifest.go @@ -0,0 +1,152 @@ +package manifest + +import ( + "io" + "os" + "strings" + "time" + + "github.com/BurntSushi/toml" +) + +const Filename = "manifest.toml" + +type DevelopmentStatus string + +const ( + Planning DevelopmentStatus = "planning" + PreAlpha DevelopmentStatus = "pre-alpha" + Alpha DevelopmentStatus = "alpha" + Beta DevelopmentStatus = "beta" + Stable DevelopmentStatus = "stable" + Mature DevelopmentStatus = "mature" + Inactive DevelopmentStatus = "inactive" +) + +// Rank is an integer indicating driver stability. Higher is better. +func (s DevelopmentStatus) Rank() int { + // TODO: make DevelopmentStatus an int enum and provide text marshal/unmarshal methods + return statusRanks[s] +} + +var statusRanks = map[DevelopmentStatus]int{ + Inactive: 0, + Planning: 1, + PreAlpha: 2, + Alpha: 3, + Beta: 4, + Stable: 5, + Mature: 6, +} + +// InformationLoss in terms of which kind of code generation would they allow. +type InformationLoss string + +const ( + // Lossless no information loss converting code to AST and then back to code + // would. code == codegen(AST(code)). + Lossless InformationLoss = "lossless" + // FormatingLoss only superfluous formatting information is lost (e.g. + // whitespace, indentation). Code generated from the AST could be the same + // as the original code after passing a code formatter. + // fmt(code) == codegen(AST(code)). + FormatingLoss InformationLoss = "formating-loss" + // SyntacticSugarLoss there is information loss about syntactic sugar. Code + // generated from the AST could be the same as the original code after + // desugaring it. desugar(code) == codegen(AST(code)). + SyntacticSugarLoss InformationLoss = "syntactic-sugar-loss" + // CommentLoss comments are not present in the AST. + CommentLoss InformationLoss = "formating-loss" +) + +// Feature describes which level of information driver can produce. +type Feature string + +const ( + // AST is a basic feature required for the driver. Driver can parse files and return native language AST. + AST Feature = "ast" + // UAST feature indicates that driver properly converts AST to UAST without further annotating it. + UAST Feature = "uast" + // Roles feature indicates that driver annotates UAST with roles. All node types are annotated. + Roles Feature = "roles" +) + +type OS string + +const ( + Alpine OS = "alpine" + Debian OS = "debian" +) + +func (os OS) AsImage() string { + switch os { + case Alpine: + return "alpine:3.7" + case Debian: + return "debian:jessie-slim" + default: + return "" + } +} + +type Manifest struct { + Name string `toml:"name"` // human-readable name + Language string `toml:"language"` + Version string `toml:"version,omitempty"` + Build *time.Time `toml:"build,omitempty"` + Status DevelopmentStatus `toml:"status"` + InformationLoss []InformationLoss `toml:"loss"` + Documentation struct { + Description string `toml:"description,omitempty"` + Caveats string `toml:"caveats,omitempty"` + } `toml:"documentation,omitempty"` + Runtime struct { + OS OS `toml:"os"` + NativeVersion Versions `toml:"native_version"` + NativeEncoding string `toml:"native_encoding"` + GoVersion string `toml:"go_version"` + } `toml:"runtime"` + Features []Feature `toml:"features"` +} + +// Supports checks if driver supports specified feature. +func (m Manifest) Supports(f Feature) bool { + for _, f2 := range m.Features { + if f == f2 { + return true + } + } + return false +} + +type Versions []string + +func (v Versions) String() string { + return strings.Join(v, ":") +} + +// Load reads a manifest and decode the content into a new Manifest struct +func Load(path string) (*Manifest, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + + m := &Manifest{} + return m, m.Decode(f) +} + +// Encode encodes m in toml format and writes the restult to w +func (m *Manifest) Encode(w io.Writer) error { + e := toml.NewEncoder(w) + return e.Encode(m) +} + +// Decode decodes reads r and decodes it into m +func (m *Manifest) Decode(r io.Reader) error { + if _, err := toml.DecodeReader(r, m); err != nil { + return err + } + + return nil +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/driver/transforms.go b/vendor/gopkg.in/bblfsh/sdk.v2/driver/transforms.go new file mode 100644 index 0000000..80b048e --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/driver/transforms.go @@ -0,0 +1,74 @@ +package driver + +import ( + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/transformer" +) + +// Transforms describes a set of AST transformations this driver requires. +type Transforms struct { + // Namespace for this language driver + Namespace string + // Preprocess transforms normalizes native AST. + // It usually includes: + // * changing type key to uast.KeyType + // * changing token key to uast.KeyToken + // * restructure positional information + Preprocess []transformer.Transformer + // Normalize converts known AST structures to high-level AST representation (UAST). + Normalize []transformer.Transformer + // Annotations transforms annotates the tree with roles. + Annotations []transformer.Transformer + // Code transforms are applied directly after Native and provide a way + // to extract more information from source files, fix positional info, etc. + Code []transformer.CodeTransformer +} + +// Do applies AST transformation pipeline for specified nodes. +func (t Transforms) Do(mode Mode, code string, nd nodes.Node) (nodes.Node, error) { + if mode == 0 { + mode = ModeDefault + } + if mode == ModeNative { + return nd, nil + } + var err error + + runAll := func(list []transformer.Transformer) error { + for _, t := range list { + nd, err = t.Do(nd) + if err != nil { + return err + } + } + return nil + } + if err := runAll(t.Preprocess); err != nil { + return nd, err + } + if mode >= ModeSemantic { + if err := runAll(t.Normalize); err != nil { + return nd, err + } + } + if mode >= ModeAnnotated { + if err := runAll(t.Annotations); err != nil { + return nd, err + } + } + + for _, ct := range t.Code { + t := ct.OnCode(code) + nd, err = t.Do(nd) + if err != nil { + return nd, err + } + } + if mode >= ModeSemantic && t.Namespace != "" { + nd, err = transformer.DefaultNamespace(t.Namespace).Do(nd) + if err != nil { + return nd, err + } + } + return nd, nil +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/etc/skeleton/LICENSE b/vendor/gopkg.in/bblfsh/sdk.v2/etc/skeleton/LICENSE new file mode 100644 index 0000000..9cecc1d --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/etc/skeleton/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/protocol/driver.go b/vendor/gopkg.in/bblfsh/sdk.v2/protocol/driver.go new file mode 100644 index 0000000..69993f5 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/protocol/driver.go @@ -0,0 +1,74 @@ +package protocol + +import ( + "bytes" + "context" + + xcontext "golang.org/x/net/context" + "google.golang.org/grpc" + "gopkg.in/bblfsh/sdk.v2/driver" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto" +) + +//go:generate protoc --proto_path=$GOPATH/src:. --gogo_out=plugins=grpc:. ./driver.proto + +func RegisterDriver(srv *grpc.Server, d driver.Driver) { + RegisterDriverServer(srv, &driverServer{d: d}) +} + +func AsDriver(cc *grpc.ClientConn, lang string) driver.Driver { + return &client{c: NewDriverClient(cc), lang: lang} +} + +type driverServer struct { + d driver.Driver +} + +func (s *driverServer) Parse(ctx xcontext.Context, req *ParseRequest) (*ParseResponse, error) { + var resp ParseResponse + n, err := s.d.Parse(ctx, driver.Mode(req.Mode), req.Content) + if e, ok := err.(*driver.ErrPartialParse); ok { + n = e.AST + for _, txt := range e.Errors { + resp.Errors = append(resp.Errors, &ParseError{Text: txt}) + } + } else if err != nil { + return nil, err + } + buf := bytes.NewBuffer(nil) + err = nodesproto.WriteTo(buf, n) + if err != nil { + return nil, err + } + resp.Uast = buf.Bytes() + return &resp, nil +} + +type client struct { + c DriverClient + lang string +} + +func (c *client) Parse(ctx context.Context, mode driver.Mode, src string) (nodes.Node, error) { + resp, err := c.c.Parse(ctx, &ParseRequest{Content: src, Mode: Mode(mode), Language: c.lang}) + if err != nil { + return nil, err + } + return resp.Nodes() +} + +func (m *ParseResponse) Nodes() (nodes.Node, error) { + ast, err := nodesproto.ReadTree(bytes.NewReader(m.Uast)) + if err != nil { + return nil, err + } + if len(m.Errors) != 0 { + var errs []string + for _, e := range m.Errors { + errs = append(errs, e.Text) + } + return nil, &driver.ErrPartialParse{AST: ast, ErrMulti: driver.ErrMulti{Errors: errs}} + } + return ast, nil +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/protocol/driver.pb.go b/vendor/gopkg.in/bblfsh/sdk.v2/protocol/driver.pb.go new file mode 100644 index 0000000..f80f58b --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/protocol/driver.pb.go @@ -0,0 +1,891 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: driver.proto + +/* + Package protocol is a generated protocol buffer package. + + It is generated from these files: + driver.proto + + It has these top-level messages: + ParseRequest + ParseResponse + ParseError +*/ +package protocol + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Mode int32 + +const ( + // DefaultMode selects the transformation mode that is considered to produce UAST of the best quality. + Mode_DefaultMode Mode = 0 + // Native disables any UAST transformations and emits a native language AST as returned by the parser. + Mode_Native Mode = 1 + // Preprocessed runs only basic transformation over native AST (normalize positional info, type fields). + Mode_Preprocessed Mode = 2 + // Annotated UAST is based on native AST, but provides role annotations for nodes. + Mode_Annotated Mode = 4 + // Semantic UAST normalizes native AST nodes to a unified structure where possible. + Mode_Semantic Mode = 8 +) + +var Mode_name = map[int32]string{ + 0: "DEFAULT_MODE", + 1: "NATIVE", + 2: "PREPROCESSED", + 4: "ANNOTATED", + 8: "SEMANTIC", +} +var Mode_value = map[string]int32{ + "DEFAULT_MODE": 0, + "NATIVE": 1, + "PREPROCESSED": 2, + "ANNOTATED": 4, + "SEMANTIC": 8, +} + +func (x Mode) String() string { + return proto.EnumName(Mode_name, int32(x)) +} +func (Mode) EnumDescriptor() ([]byte, []int) { return fileDescriptorDriver, []int{0} } + +// ParseRequest is a request to parse a file and get its UAST. +type ParseRequest struct { + // Content stores the content of a source file. Required. + Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` + // Language can be set optionally to disable automatic language detection. + Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` + // Filename can be set optionally to assist automatic language detection. + Filename string `protobuf:"bytes,3,opt,name=filename,proto3" json:"filename,omitempty"` + // Mode sets a transformation pipeline used for UAST. + Mode Mode `protobuf:"varint,4,opt,name=mode,proto3,enum=gopkg.in.bblfsh.sdk.v2.protocol.Mode" json:"mode,omitempty"` +} + +func (m *ParseRequest) Reset() { *m = ParseRequest{} } +func (m *ParseRequest) String() string { return proto.CompactTextString(m) } +func (*ParseRequest) ProtoMessage() {} +func (*ParseRequest) Descriptor() ([]byte, []int) { return fileDescriptorDriver, []int{0} } + +// ParseResponse is the reply to ParseRequest. +type ParseResponse struct { + // UAST is a binary encoding of the resulting UAST. + Uast []byte `protobuf:"bytes,1,opt,name=uast,proto3" json:"uast,omitempty"` + // Language that was automatically detected. + Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` + // Errors is a list of parsing errors. + // Only set if parser was able to return a response. Otherwise gRPC error codes are used. + Errors []*ParseError `protobuf:"bytes,3,rep,name=errors" json:"errors,omitempty"` +} + +func (m *ParseResponse) Reset() { *m = ParseResponse{} } +func (m *ParseResponse) String() string { return proto.CompactTextString(m) } +func (*ParseResponse) ProtoMessage() {} +func (*ParseResponse) Descriptor() ([]byte, []int) { return fileDescriptorDriver, []int{1} } + +type ParseError struct { + // Text is an error message. + Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"` +} + +func (m *ParseError) Reset() { *m = ParseError{} } +func (m *ParseError) String() string { return proto.CompactTextString(m) } +func (*ParseError) ProtoMessage() {} +func (*ParseError) Descriptor() ([]byte, []int) { return fileDescriptorDriver, []int{2} } + +func init() { + proto.RegisterType((*ParseRequest)(nil), "gopkg.in.bblfsh.sdk.v2.protocol.ParseRequest") + proto.RegisterType((*ParseResponse)(nil), "gopkg.in.bblfsh.sdk.v2.protocol.ParseResponse") + proto.RegisterType((*ParseError)(nil), "gopkg.in.bblfsh.sdk.v2.protocol.ParseError") + proto.RegisterEnum("gopkg.in.bblfsh.sdk.v2.protocol.Mode", Mode_name, Mode_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Driver service + +type DriverClient interface { + // Parse returns an UAST for a given source file. + Parse(ctx context.Context, in *ParseRequest, opts ...grpc.CallOption) (*ParseResponse, error) +} + +type driverClient struct { + cc *grpc.ClientConn +} + +func NewDriverClient(cc *grpc.ClientConn) DriverClient { + return &driverClient{cc} +} + +func (c *driverClient) Parse(ctx context.Context, in *ParseRequest, opts ...grpc.CallOption) (*ParseResponse, error) { + out := new(ParseResponse) + err := grpc.Invoke(ctx, "/gopkg.in.bblfsh.sdk.v2.protocol.Driver/Parse", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Driver service + +type DriverServer interface { + // Parse returns an UAST for a given source file. + Parse(context.Context, *ParseRequest) (*ParseResponse, error) +} + +func RegisterDriverServer(s *grpc.Server, srv DriverServer) { + s.RegisterService(&_Driver_serviceDesc, srv) +} + +func _Driver_Parse_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ParseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DriverServer).Parse(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/gopkg.in.bblfsh.sdk.v2.protocol.Driver/Parse", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DriverServer).Parse(ctx, req.(*ParseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Driver_serviceDesc = grpc.ServiceDesc{ + ServiceName: "gopkg.in.bblfsh.sdk.v2.protocol.Driver", + HandlerType: (*DriverServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Parse", + Handler: _Driver_Parse_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "driver.proto", +} + +func (m *ParseRequest) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParseRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Content) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDriver(dAtA, i, uint64(len(m.Content))) + i += copy(dAtA[i:], m.Content) + } + if len(m.Language) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDriver(dAtA, i, uint64(len(m.Language))) + i += copy(dAtA[i:], m.Language) + } + if len(m.Filename) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintDriver(dAtA, i, uint64(len(m.Filename))) + i += copy(dAtA[i:], m.Filename) + } + if m.Mode != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintDriver(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *ParseResponse) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParseResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Uast) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDriver(dAtA, i, uint64(len(m.Uast))) + i += copy(dAtA[i:], m.Uast) + } + if len(m.Language) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDriver(dAtA, i, uint64(len(m.Language))) + i += copy(dAtA[i:], m.Language) + } + if len(m.Errors) > 0 { + for _, msg := range m.Errors { + dAtA[i] = 0x1a + i++ + i = encodeVarintDriver(dAtA, i, uint64(msg.ProtoSize())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ParseError) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParseError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Text) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDriver(dAtA, i, uint64(len(m.Text))) + i += copy(dAtA[i:], m.Text) + } + return i, nil +} + +func encodeVarintDriver(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ParseRequest) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Content) + if l > 0 { + n += 1 + l + sovDriver(uint64(l)) + } + l = len(m.Language) + if l > 0 { + n += 1 + l + sovDriver(uint64(l)) + } + l = len(m.Filename) + if l > 0 { + n += 1 + l + sovDriver(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovDriver(uint64(m.Mode)) + } + return n +} + +func (m *ParseResponse) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Uast) + if l > 0 { + n += 1 + l + sovDriver(uint64(l)) + } + l = len(m.Language) + if l > 0 { + n += 1 + l + sovDriver(uint64(l)) + } + if len(m.Errors) > 0 { + for _, e := range m.Errors { + l = e.ProtoSize() + n += 1 + l + sovDriver(uint64(l)) + } + } + return n +} + +func (m *ParseError) ProtoSize() (n int) { + var l int + _ = l + l = len(m.Text) + if l > 0 { + n += 1 + l + sovDriver(uint64(l)) + } + return n +} + +func sovDriver(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDriver(x uint64) (n int) { + return sovDriver(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ParseRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDriver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Content = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Language", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDriver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Language = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDriver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filename = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (Mode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDriver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDriver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uast", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDriver + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uast = append(m.Uast[:0], dAtA[iNdEx:postIndex]...) + if m.Uast == nil { + m.Uast = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Language", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDriver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Language = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDriver + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Errors = append(m.Errors, &ParseError{}) + if err := m.Errors[len(m.Errors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDriver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDriver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParseError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParseError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParseError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Text", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDriver + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDriver + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Text = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDriver(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDriver + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDriver(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDriver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDriver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDriver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDriver + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDriver + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDriver(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDriver = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDriver = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("driver.proto", fileDescriptorDriver) } + +var fileDescriptorDriver = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0xcd, 0x8e, 0x93, 0x50, + 0x14, 0xee, 0x9d, 0x56, 0x6c, 0xef, 0x30, 0x4a, 0xee, 0xc2, 0x10, 0x62, 0x10, 0x9b, 0x98, 0x34, + 0x9a, 0x61, 0x92, 0xba, 0x72, 0x89, 0x05, 0x93, 0x49, 0x2c, 0x6d, 0x28, 0xba, 0x70, 0x63, 0xf8, + 0x39, 0x30, 0x64, 0x28, 0x17, 0xb9, 0x97, 0xc6, 0x07, 0x70, 0xc5, 0x2b, 0x18, 0xa2, 0x8f, 0x33, + 0x4b, 0x1f, 0x41, 0xeb, 0x8b, 0x18, 0x2e, 0xad, 0xee, 0x9c, 0xd9, 0x7d, 0xdf, 0xfd, 0xce, 0x97, + 0xf3, 0x9d, 0x73, 0x0f, 0x96, 0xe3, 0x2a, 0xdb, 0x41, 0x65, 0x96, 0x15, 0xe5, 0x94, 0x3c, 0x49, + 0x69, 0x79, 0x9d, 0x9a, 0x59, 0x61, 0x86, 0x61, 0x9e, 0xb0, 0x2b, 0x93, 0xc5, 0xd7, 0xe6, 0x6e, + 0xde, 0xab, 0x11, 0xcd, 0xb5, 0xf3, 0x34, 0xe3, 0x57, 0x75, 0x68, 0x46, 0x74, 0x7b, 0x91, 0xd2, + 0x94, 0x5e, 0x08, 0x25, 0xac, 0x13, 0xc1, 0x04, 0x11, 0xa8, 0x77, 0x4c, 0xbf, 0x22, 0x2c, 0xaf, + 0x83, 0x8a, 0x81, 0x07, 0x9f, 0x6a, 0x60, 0x9c, 0xa8, 0xf8, 0x7e, 0x44, 0x0b, 0x0e, 0x05, 0x57, + 0x91, 0x81, 0x66, 0x13, 0xef, 0x48, 0x89, 0x86, 0xc7, 0x79, 0x50, 0xa4, 0x75, 0x90, 0x82, 0x7a, + 0x22, 0xa4, 0xbf, 0xbc, 0xd3, 0x92, 0x2c, 0x87, 0x22, 0xd8, 0x82, 0x3a, 0xec, 0xb5, 0x23, 0x27, + 0xaf, 0xf0, 0x68, 0x4b, 0x63, 0x50, 0x47, 0x06, 0x9a, 0x3d, 0x98, 0x3f, 0x33, 0x6f, 0x99, 0xc0, + 0x5c, 0xd2, 0x18, 0x3c, 0x61, 0x99, 0x7e, 0x41, 0xf8, 0xec, 0x90, 0x8e, 0x95, 0xb4, 0x60, 0x40, + 0x08, 0x1e, 0xd5, 0x01, 0xeb, 0xb3, 0xc9, 0x9e, 0xc0, 0xff, 0x0d, 0xb6, 0xc0, 0x12, 0x54, 0x15, + 0xad, 0x98, 0x3a, 0x34, 0x86, 0xb3, 0xd3, 0xf9, 0x8b, 0x5b, 0xdb, 0x8b, 0x7e, 0x4e, 0xe7, 0xf1, + 0x0e, 0xd6, 0xa9, 0x81, 0xf1, 0xbf, 0xd7, 0x2e, 0x02, 0x87, 0xcf, 0xc7, 0xf5, 0x08, 0xfc, 0xfc, + 0x1b, 0xc2, 0xa3, 0x2e, 0x37, 0x79, 0x8a, 0x65, 0xdb, 0x79, 0x63, 0xbd, 0x7b, 0xeb, 0x7f, 0x5c, + 0xae, 0x6c, 0x47, 0x19, 0x68, 0x0f, 0x9b, 0xd6, 0x38, 0xb5, 0x21, 0x09, 0xea, 0x9c, 0x8b, 0x92, + 0x47, 0x58, 0x72, 0x2d, 0xff, 0xf2, 0xbd, 0xa3, 0x20, 0x0d, 0x37, 0xad, 0x21, 0xb9, 0x01, 0xcf, + 0x76, 0x40, 0xa6, 0x58, 0x5e, 0x7b, 0xce, 0xda, 0x5b, 0x2d, 0x9c, 0xcd, 0xc6, 0xb1, 0x95, 0x13, + 0x4d, 0x69, 0x5a, 0x43, 0x5e, 0x57, 0x50, 0x56, 0x34, 0x02, 0xc6, 0x20, 0x26, 0x8f, 0xf1, 0xc4, + 0x72, 0xdd, 0x95, 0x6f, 0xf9, 0x8e, 0xad, 0x8c, 0xb4, 0xb3, 0xa6, 0x35, 0x26, 0x56, 0x51, 0x50, + 0x1e, 0x70, 0x88, 0xbb, 0x45, 0x6c, 0x9c, 0xa5, 0xe5, 0xfa, 0x97, 0x0b, 0x65, 0xac, 0xc9, 0x4d, + 0x6b, 0x8c, 0x37, 0xb0, 0x0d, 0x0a, 0x9e, 0x45, 0xf3, 0x12, 0x4b, 0xb6, 0x38, 0x24, 0x92, 0xe0, + 0x7b, 0x62, 0x1a, 0x72, 0x7e, 0xb7, 0x5d, 0x1c, 0x2e, 0x43, 0x33, 0xef, 0x5a, 0xde, 0x7f, 0xd5, + 0x6b, 0xfd, 0xe6, 0x97, 0x3e, 0xb8, 0xd9, 0xeb, 0xe8, 0xc7, 0x5e, 0x47, 0x3f, 0xf7, 0xfa, 0xe0, + 0xfb, 0x6f, 0x1d, 0x7d, 0x18, 0x1f, 0xab, 0x43, 0x49, 0xa0, 0x97, 0x7f, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x64, 0x59, 0x48, 0x92, 0xe1, 0x02, 0x00, 0x00, +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/protocol/v1/legacy.go b/vendor/gopkg.in/bblfsh/sdk.v2/protocol/v1/legacy.go new file mode 100644 index 0000000..8959e5e --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/protocol/v1/legacy.go @@ -0,0 +1,178 @@ +package uast1 + +import ( + "fmt" + "sort" + "strings" + + uast1 "gopkg.in/bblfsh/sdk.v1/uast" + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/role" +) + +// ToNode converts a generic AST node to Node object used in the protocol. +func ToNode(n nodes.Node) (*uast1.Node, error) { + nd, err := asNode(n, "") + if err != nil { + return nil, err + } + switch len(nd) { + case 0: + return nil, nil + case 1: + return nd[0], nil + default: + return &uast1.Node{Children: nd}, nil + } +} + +func arrayAsNode(n nodes.Array, field string) ([]*uast1.Node, error) { + arr := make([]*uast1.Node, 0, len(n)) + for _, s := range n { + nd, err := asNode(s, field) + if err != nil { + return arr, err + } + arr = append(arr, nd...) + } + return arr, nil +} + +func pos(p *uast.Position) *uast1.Position { + return (*uast1.Position)(p) +} +func roles(arr role.Roles) []uast1.Role { + out := make([]uast1.Role, 0, len(arr)) + for _, r := range arr { + out = append(out, uast1.Role(r)) + } + return out +} + +func objectAsNode(n nodes.Object, field string) ([]*uast1.Node, error) { + ps := uast.PositionsOf(n) + typ := uast.TypeOf(n) + if i := strings.Index(typ, ":"); i >= 0 { + typ = typ[i+1:] + } + nd := &uast1.Node{ + InternalType: typ, + Token: uast.TokenOf(n), + Roles: roles(uast.RolesOf(n)), + StartPosition: pos(ps.Start()), + EndPosition: pos(ps.End()), + Properties: make(map[string]string), + } + if field != "" { + nd.Properties[uast1.InternalRoleKey] = field + } + for k, np := range ps { + switch k { + case uast.KeyStart, uast.KeyEnd: + // already processed + continue + } + sn, err := asNode(np.ToObject(), k) + if err != nil { + return nil, err + } + p := *pos(&np) + sp := p + ep := sp + ep.Col++ + ep.Offset++ + sn[0].StartPosition = &sp + sn[0].EndPosition = &ep + nd.Children = append(nd.Children, sn...) + } + + for k, v := range n { + switch k { + case uast.KeyType, uast.KeyToken, uast.KeyRoles, uast.KeyPos: + // already processed + continue + } + if nv, ok := v.(nodes.Value); ok { + nd.Properties[k] = fmt.Sprint(nv.Native()) + } else { + sn, err := asNode(v, k) + if err != nil { + return nil, err + } + nd.Children = append(nd.Children, sn...) + } + } + sort.Stable(byOffset(nd.Children)) + return []*uast1.Node{nd}, nil +} + +func valueAsNode(n nodes.Value, field string) ([]*uast1.Node, error) { + nd := &uast1.Node{ + Token: fmt.Sprint(n), + Properties: make(map[string]string), + } + if field != "" { + nd.Properties[uast1.InternalRoleKey] = field + } + return []*uast1.Node{nd}, nil +} + +func asNode(n nodes.Node, field string) ([]*uast1.Node, error) { + switch n := n.(type) { + case nil: + return nil, nil + case nodes.Array: + return arrayAsNode(n, field) + case nodes.Object: + return objectAsNode(n, field) + case nodes.Value: + return valueAsNode(n, field) + default: + return nil, fmt.Errorf("argument should be a node or a list, got: %T", n) + } +} + +type byOffset []*uast1.Node + +func (s byOffset) Len() int { return len(s) } +func (s byOffset) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byOffset) Less(i, j int) bool { + a := s[i] + b := s[j] + apos := startPosition(a) + bpos := startPosition(b) + if apos != nil && bpos != nil { + if apos.Offset != bpos.Offset { + return apos.Offset < bpos.Offset + } + } else if (apos == nil && bpos != nil) || (apos != nil && bpos == nil) { + return bpos != nil + } + field1, ok1 := a.Properties[uast1.InternalRoleKey] + field2, ok2 := b.Properties[uast1.InternalRoleKey] + if ok1 && ok2 { + return field1 < field2 + } + return false +} + +func startPosition(n *uast1.Node) *uast1.Position { + if n.StartPosition != nil { + return n.StartPosition + } + + var min *uast1.Position + for _, c := range n.Children { + other := startPosition(c) + if other == nil { + continue + } + + if min == nil || other.Offset < min.Offset { + min = other + } + } + + return min +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/iter.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/iter.go new file mode 100644 index 0000000..9ed2960 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/iter.go @@ -0,0 +1,97 @@ +package uast + +import ( + "sort" + + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +// NewPositionalIterator creates a new iterator that enumerates all object nodes, sorting them by positions in the source file. +// Nodes with no positions will be enumerated last. +func NewPositionalIterator(root nodes.External) nodes.Iterator { + return &positionIter{root: root} +} + +type positionIter struct { + root nodes.External + nodes []nodes.External +} + +type nodesByPos struct { + nodes []nodes.External + pos []Position +} + +func (arr nodesByPos) Len() int { + return len(arr.nodes) +} + +func (arr nodesByPos) Less(i, j int) bool { + return arr.pos[i].Less(arr.pos[j]) +} + +func (arr nodesByPos) Swap(i, j int) { + arr.nodes[i], arr.nodes[j] = arr.nodes[j], arr.nodes[i] + arr.pos[i], arr.pos[j] = arr.pos[j], arr.pos[i] +} + +func (it *positionIter) sort() { + // in general, we cannot expect that parent node's positional info will include all the children + // because of this we are forced to enumerate all nodes and sort them by positions on the first call to Next + var plist []Position + noPos := func() { + plist = append(plist, Position{}) + } + posType := TypeOf(Positions{}) + nodes.WalkPreOrderExt(it.root, func(n nodes.External) bool { + if n == nil || n.Kind() != nodes.KindObject { + return true + } + obj, ok := n.(nodes.ExternalObject) + if !ok { + return true + } + if TypeOf(n) == posType { + // skip position nodes + return false + } + it.nodes = append(it.nodes, n) + m, _ := obj.ValueAt(KeyPos) + if m == nil || m.Kind() != nodes.KindObject { + noPos() + return true + } + var ps Positions + if err := NodeAs(m, &ps); err != nil { + noPos() + return true + } + if p := ps.Start(); p != nil { + plist = append(plist, *p) + } else { + noPos() + } + return true + }) + sort.Sort(nodesByPos{nodes: it.nodes, pos: plist}) + plist = nil +} + +func (it *positionIter) Next() bool { + if it.nodes == nil { + it.sort() + return len(it.nodes) != 0 + } + if len(it.nodes) == 0 { + return false + } + it.nodes = it.nodes[1:] + return len(it.nodes) != 0 +} + +func (it *positionIter) Node() nodes.External { + if len(it.nodes) == 0 { + return nil + } + return it.nodes[0] +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/external.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/external.go new file mode 100644 index 0000000..5a4d02a --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/external.go @@ -0,0 +1,148 @@ +package nodes + +import ( + "fmt" +) + +// External is a node interface that can be implemented by other packages. +type External interface { + // Kind returns a node kind. + Kind() Kind + // Value returns a primitive value of the node or nil if node is not a value. + Value() Value + // SameAs check if the node is exactly the same node as n2. This usually means checking node pointers. + SameAs(n2 External) bool +} + +// ExternalArray is an analog of Array type. +type ExternalArray interface { + External + // Size returns the number of child nodes. + Size() int + // ValueAt returns a array value by an index. + ValueAt(i int) External +} + +// ExternalObject is an analog of Object type. +type ExternalObject interface { + External + // Size returns the number of fields in an object. + Size() int + // Keys returns a sorted list of keys (object field names). + Keys() []string + // ValueAt returns an object field by key. It returns false if key does not exist. + ValueAt(key string) (External, bool) +} + +// toNodeExt converts the external node to a native node type. +// The returned value is the copy of an original node. +func toNodeExt(n External) (Node, error) { + if n == nil { + return nil, nil + } + switch kind := n.Kind(); kind { + case KindNil: + return nil, nil + case KindObject: + o, ok := n.(ExternalObject) + if !ok { + return nil, fmt.Errorf("node type %T returns a %v kind, but doesn't implement the interface", n, kind) + } + keys := o.Keys() + m := make(Object, len(keys)) + for _, k := range keys { + nv, ok := o.ValueAt(k) + if !ok { + return nil, fmt.Errorf("node type %T: key %q is listed, but cannot be fetched", n, k) + } + v, err := toNodeExt(nv) + if err != nil { + return nil, err + } + m[k] = v + } + return m, nil + case KindArray: + a, ok := n.(ExternalArray) + if !ok { + return nil, fmt.Errorf("node type %T returns a %v kind, but doesn't implement the interface", n, kind) + } + sz := a.Size() + m := make(Array, sz) + for i := 0; i < sz; i++ { + nv := a.ValueAt(i) + v, err := toNodeExt(nv) + if err != nil { + return nil, err + } + m[i] = v + } + return m, nil + default: + return n.Value(), nil + } +} + +// equalExt compares two external nodes. +func equalExt(n1, n2 External) bool { + k1, k2 := n1.Kind(), n2.Kind() + switch k1 { + case KindObject: + if k2 != KindObject { + return false + } + o1, ok := n1.(ExternalObject) + if !ok { + return false + } + o2, ok := n2.(ExternalObject) + if !ok { + return false + } + if o1.Size() != o2.Size() { + return false + } + keys1, keys2 := o1.Keys(), o2.Keys() + m := make(map[string]struct{}, len(keys1)) + for _, k := range keys1 { + m[k] = struct{}{} + } + for _, k := range keys2 { + if _, ok := m[k]; !ok { + return false + } + v1, _ := o1.ValueAt(k) + v2, _ := o2.ValueAt(k) + if !Equal(v1, v2) { + return false + } + } + return true + case KindArray: + if k2 != KindArray { + return false + } + a1, ok := n1.(ExternalArray) + if !ok { + return false + } + a2, ok := n2.(ExternalArray) + if !ok { + return false + } + sz := a1.Size() + if sz != a2.Size() { + return false + } + for i := 0; i < sz; i++ { + v1 := a1.ValueAt(i) + v2 := a2.ValueAt(i) + if !Equal(v1, v2) { + return false + } + } + return true + default: + return Equal(n1.Value(), n2.Value()) + } +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/hash.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/hash.go new file mode 100644 index 0000000..93df1f3 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/hash.go @@ -0,0 +1,179 @@ +package nodes + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "hash" + "io" + "math" + "sort" +) + +// HashSize is the size of hash used for nodes. +const HashSize = sha256.Size + +type Hash [HashSize]byte + +var DefaultHasher = NewHasher() + +// HashOf computes a hash of a node with all it's children. +// Shorthand for DefaultHasher.HashOf with default settings. +func HashOf(n External) Hash { + return DefaultHasher.HashOf(n) +} + +// NewHasher creates a new hashing config with default options. +func NewHasher() *Hasher { + return &Hasher{} +} + +// Hasher allows to configure node hashing. +type Hasher struct { + // KeyFilter allows to skip field in objects by returning false from the function. + // Hash will still reflect the presence or absence of these key, but it won't hash a value of that field. + KeyFilter func(key string) bool +} + +// HashOf computes a hash of a node with all it's children. +// Caller should not rely on a specific hash value, since the hash size and the algorithm might change. +func (h *Hasher) HashOf(n External) Hash { + hash := sha256.New() + err := h.HashTo(hash, n) + if err != nil { + panic(err) + } + var v Hash + sz := len(hash.Sum(v[:0])) + if sz != HashSize { + panic("unexpected hash size") + } + return v +} + +// HashTo hashes the node with a custom hash function. See HashOf for details. +func (h *Hasher) HashTo(hash hash.Hash, n External) error { + return h.hashTo(hash, n) +} + +var hashEndianess = binary.LittleEndian + +func (h *Hasher) hashTo(w io.Writer, n External) error { + kind := KindOf(n) + + // write kind first (uint32) + var buf [4]byte + hashEndianess.PutUint32(buf[:], uint32(kind)) + if _, err := w.Write(buf[:]); err != nil { + return err + } + switch kind { + case KindNil: + return nil + case KindArray: + arr, ok := n.(ExternalArray) + if !ok { + return fmt.Errorf("node is an array, but an interface implementation is missing: %T", n) + } + return h.hashArray(w, arr) + case KindObject: + obj, ok := n.(ExternalObject) + if !ok { + return fmt.Errorf("node is an object, but an interface implementation is missing: %T", n) + } + return h.hashObject(w, obj) + } + if kind.In(KindsValues) { + v := n.Value() + return h.hashValue(w, v) + } + return fmt.Errorf("unsupported type: %T (%s)", n, kind) +} + +func (h *Hasher) hashArray(w io.Writer, arr ExternalArray) error { + sz := arr.Size() + var buf [4]byte + hashEndianess.PutUint32(buf[:], uint32(sz)) + _, err := w.Write(buf[:]) + if err != nil { + return err + } + for i := 0; i < sz; i++ { + v := arr.ValueAt(i) + if err = h.hashTo(w, v); err != nil { + return err + } + } + return nil +} + +func (h *Hasher) hashObject(w io.Writer, obj ExternalObject) error { + sz := obj.Size() + var buf [4]byte + hashEndianess.PutUint32(buf[:], uint32(sz)) + _, err := w.Write(buf[:]) + if err != nil { + return err + } + keys := obj.Keys() + if !sort.StringsAreSorted(keys) { + return fmt.Errorf("object keys are not sorted: %T", obj) + } + for _, key := range keys { + v, ok := obj.ValueAt(key) + if !ok { + return fmt.Errorf("key %q is listed, but the value is missing in %T", key, obj) + } + if err = h.hashValue(w, String(key)); err != nil { + return err + } + if h.KeyFilter != nil && !h.KeyFilter(key) { + continue + } + if err = h.hashTo(w, v); err != nil { + return err + } + } + return nil +} + +func (h *Hasher) hashValue(w io.Writer, v Value) error { + switch v := v.(type) { + case nil: + return nil + case Bool: + var err error + if v { + _, err = w.Write([]byte{1}) + } else { + _, err = w.Write([]byte{0}) + } + return err + case Int: + var buf [8]byte + hashEndianess.PutUint64(buf[:], uint64(v)) + _, err := w.Write(buf[:]) + return err + case Uint: + var buf [8]byte + hashEndianess.PutUint64(buf[:], uint64(v)) + _, err := w.Write(buf[:]) + return err + case Float: + var buf [8]byte + hashEndianess.PutUint64(buf[:], math.Float64bits(float64(v))) + _, err := w.Write(buf[:]) + return err + case String: + var buf [4]byte + hashEndianess.PutUint32(buf[:], uint32(len(v))) + _, err := w.Write(buf[:]) + if err != nil { + return err + } + _, err = w.Write([]byte(v)) + return err + default: + return fmt.Errorf("unsupported value type: %T (%s)", v, v.Kind()) + } +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/iter.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/iter.go new file mode 100644 index 0000000..d3a82ef --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/iter.go @@ -0,0 +1,213 @@ +package nodes + +import "fmt" + +type Iterator interface { + // Next advances an iterator. + Next() bool + // Node returns a current node. + Node() External +} + +var _ Iterator = Empty{} + +type Empty struct{} + +func (Empty) Next() bool { return false } +func (Empty) Node() External { return nil } + +type IterOrder int + +const ( + IterAny = IterOrder(iota) + PreOrder + PostOrder + LevelOrder +) + +func NewIterator(root External, order IterOrder) Iterator { + if root == nil { + return Empty{} + } + if order == IterAny { + order = PreOrder + } + switch order { + case PreOrder: + it := &preOrderIter{} + it.push(root) + return it + case PostOrder: + it := &postOrderIter{} + it.start(root) + return it + case LevelOrder: + return &levelOrderIter{level: []External{root}, i: -1} + default: + panic(fmt.Errorf("unsupported iterator order: %v", order)) + } +} + +func eachChild(n External, fnc func(v External)) { + switch KindOf(n) { + case KindObject: + if m, ok := n.(ExternalObject); ok { + keys := m.Keys() + for _, k := range keys { + if v, _ := m.ValueAt(k); v != nil { + fnc(v) + } + } + } + case KindArray: + if m, ok := n.(ExternalArray); ok { + sz := m.Size() + for i := 0; i < sz; i++ { + if v := m.ValueAt(i); v != nil { + fnc(v) + } + } + } + } +} + +func eachChildRev(n External, fnc func(v External)) { + switch KindOf(n) { + case KindObject: + if m, ok := n.(ExternalObject); ok { + keys := m.Keys() + // reverse order + for i := len(keys) - 1; i >= 0; i-- { + if v, _ := m.ValueAt(keys[i]); v != nil { + fnc(v) + } + } + } + case KindArray: + if m, ok := n.(ExternalArray); ok { + sz := m.Size() + // reverse order + for i := sz - 1; i >= 0; i-- { + if v := m.ValueAt(i); v != nil { + fnc(v) + } + } + } + } +} + +type preOrderIter struct { + cur External + q []External +} + +func (it *preOrderIter) push(n External) { + if n == nil { + return + } + it.q = append(it.q, n) +} +func (it *preOrderIter) pop() External { + l := len(it.q) + if l == 0 { + return nil + } + n := it.q[l-1] + it.q = it.q[:l-1] + return n +} + +func (it *preOrderIter) Next() bool { + cur := it.cur + it.cur = nil + eachChildRev(cur, it.push) + it.cur = it.pop() + return KindOf(it.cur) != KindNil +} +func (it *preOrderIter) Node() External { + return it.cur +} + +type postOrderIter struct { + cur External + s [][]External +} + +func (it *postOrderIter) start(n External) { + kind := KindOf(n) + if kind == KindNil { + return + } + si := len(it.s) + q := []External{n} + it.s = append(it.s, nil) + eachChildRev(n, func(v External) { + q = append(q, v) + }) + if l := len(q); l > 1 { + it.start(q[l-1]) + q = q[:l-1] + } + it.s[si] = q +} + +func (it *postOrderIter) Next() bool { + down := false + for { + l := len(it.s) + if l == 0 { + return false + } + l-- + top := it.s[l] + if len(top) == 0 { + it.s = it.s[:l] + down = true + continue + } + i := len(top) - 1 + if down && i > 0 { + down = false + n := top[i] + it.s[l] = top[:i] + it.start(n) + continue + } + down = false + it.cur = top[i] + it.s[l] = top[:i] + return true + } +} +func (it *postOrderIter) Node() External { + return it.cur +} + +type levelOrderIter struct { + level []External + i int +} + +func (it *levelOrderIter) Next() bool { + if len(it.level) == 0 { + return false + } else if it.i+1 < len(it.level) { + it.i++ + return true + } + var next []External + for _, n := range it.level { + eachChild(n, func(v External) { + next = append(next, v) + }) + } + it.i = 0 + it.level = next + return len(it.level) > 0 +} +func (it *levelOrderIter) Node() External { + if it.i >= len(it.level) { + return nil + } + return it.level[it.i] +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/node.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/node.go new file mode 100644 index 0000000..8fd3ef6 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/node.go @@ -0,0 +1,950 @@ +package nodes + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +const applySort = false + +// Equal compares two subtrees. +// Equality is checked by value (deep), not by reference. +func Equal(n1, n2 External) bool { + if n1 == nil && n2 == nil { + return true + } else if n1 == nil || n2 == nil { + return false + } + if Same(n1, n2) { + return true + } + if n, ok := n1.(Node); ok { + return n.Equal(n2) + } else if n, ok = n2.(Node); ok { + return n.Equal(n1) + } + return equalExt(n1, n2) +} + +// Node is a generic interface for a tree structure. +// +// Can be one of: +// * Object +// * Array +// * Value +type Node interface { + External + // Clone creates a deep copy of the node. + Clone() Node + // Native returns a native Go type for this node. + Native() interface{} + // Equal checks if the node is equal to another node. + // Equality is checked by value (deep), not by reference. + Equal(n2 External) bool + + isNode() // to limit possible implementations +} + +// Value is a generic interface for primitive values. +// +// Can be one of: +// * String +// * Int +// * Uint +// * Float +// * Bool +type Value interface { + Node + Comparable + isValue() // to limit possible types +} + +// NodePtr is an assignable node pointer. +type NodePtr interface { + Value + SetNode(v Node) error +} + +// Kind is a node kind. +type Kind int + +func (k Kind) Split() []Kind { + var kinds []Kind + for _, k2 := range []Kind{ + KindNil, + KindObject, + KindArray, + KindString, + KindInt, + KindUint, + KindFloat, + KindBool, + } { + if k.In(k2) { + kinds = append(kinds, k2) + } + } + if k2 := k &^ KindsAny; k2 != 0 { + kinds = append(kinds, k2) + } + return kinds +} +func (k Kind) In(k2 Kind) bool { + return (k & k2) != 0 +} +func (k Kind) String() string { + kinds := k.Split() + str := make([]string, 0, len(kinds)) + for _, k := range kinds { + var s string + switch k { + case KindNil: + s = "Nil" + case KindObject: + s = "Object" + case KindArray: + s = "Array" + case KindString: + s = "String" + case KindInt: + s = "Int" + case KindUint: + s = "Uint" + case KindFloat: + s = "Float" + case KindBool: + s = "Bool" + default: + s = fmt.Sprintf("Kind(%d)", int(k)) + } + str = append(str, s) + } + return strings.Join(str, " | ") +} + +const ( + KindNil = Kind(1 << iota) + KindObject + KindArray + KindString + KindInt + KindUint + KindFloat + KindBool +) + +const ( + KindsValues = KindString | KindInt | KindUint | KindFloat | KindBool + KindsComposite = KindObject | KindArray + KindsNotNil = KindsComposite | KindsValues + KindsAny = KindNil | KindsNotNil +) + +// KindOf returns a kind of the node. +func KindOf(n External) Kind { + if n == nil { + return KindNil + } + return n.Kind() +} + +var _ ExternalObject = Object{} + +// Object is a representation of generic node with fields. +type Object map[string]Node + +func (Object) isNode() {} + +func (Object) Kind() Kind { + return KindObject +} + +func (Object) Value() Value { + return nil +} + +func (m Object) Size() int { + return len(m) +} + +// Native converts an object to a generic Go map type (map[string]interface{}). +func (m Object) Native() interface{} { + if m == nil { + return nil + } + o := make(map[string]interface{}, len(m)) + for k, v := range m { + if v != nil { + o[k] = v.Native() + } else { + o[k] = nil + } + } + return o +} + +// Keys returns a sorted list of node keys. +func (m Object) Keys() []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (m Object) ValueAt(k string) (External, bool) { + v, ok := m[k] + if !ok { + return nil, false + } + return v, true +} + +// Clone returns a deep copy of an Object. +func (m Object) Clone() Node { + out := make(Object, len(m)) + for k, v := range m { + if v != nil { + out[k] = v.Clone() + } else { + out[k] = nil + } + } + return out +} + +// CloneObject clones this node only, without deep copy of field values. +func (m Object) CloneObject() Object { + out := make(Object, len(m)) + for k, v := range m { + out[k] = v + } + return out +} + +// Set is a helper for setting node properties. +func (m Object) Set(k string, v Node) Object { + m[k] = v + return m +} + +func (m *Object) SetNode(n Node) error { + if m2, ok := n.(Object); ok || n == nil { + *m = m2 + return nil + } + return fmt.Errorf("unexpected type: %T", n) +} + +func (m Object) Equal(n External) bool { + switch n := n.(type) { + case nil: + return false + case Object: + return m.EqualObject(n) + case Node: + // internal node, but not an object + return false + default: + // external node + if n.Kind() != KindObject { + return false + } + m2, ok := n.(ExternalObject) + if !ok { + return false + } + return m.equalObjectExt(m2) + } +} + +func (m Object) EqualObject(m2 Object) bool { + if len(m) != len(m2) { + return false + } + for k, v := range m { + if v2, ok := m2[k]; !ok || !Equal(v, v2) { + return false + } + } + return true +} + +func (m Object) equalObjectExt(m2 ExternalObject) bool { + if len(m) != m2.Size() { + return false + } + for _, k := range m2.Keys() { + v1, ok := m[k] + if !ok { + return false + } + v2, _ := m2.ValueAt(k) + if !Equal(v1, v2) { + return false + } + } + return true +} + +func (m Object) SameAs(n External) bool { + // this call relies on the fact that Same will never call SameAs on internal nodes. + return Same(m, n) +} + +var _ ExternalArray = Array{} + +// Array is an ordered list of nodes. +type Array []Node + +func (Array) isNode() {} + +func (Array) Kind() Kind { + return KindArray +} + +func (Array) Value() Value { + return nil +} + +func (m Array) Size() int { + return len(m) +} + +// Native converts an array to a generic Go slice type ([]interface{}). +func (m Array) Native() interface{} { + if m == nil { + return nil + } + o := make([]interface{}, 0, len(m)) + for _, v := range m { + if v != nil { + o = append(o, v.Native()) + } else { + o = append(o, nil) + } + } + return o +} + +func (m Array) ValueAt(i int) External { + if i < 0 || i >= len(m) { + return nil + } + return m[i] +} + +// Clone returns a deep copy of an Array. +func (m Array) Clone() Node { + out := make(Array, 0, len(m)) + for _, v := range m { + out = append(out, v.Clone()) + } + return out +} + +// CloneList creates a copy of an Array without copying it's elements. +func (m Array) CloneList() Array { + out := make(Array, 0, len(m)) + for _, v := range m { + out = append(out, v) + } + return out +} + +func (m Array) Equal(n External) bool { + switch n := n.(type) { + case nil: + return false + case Array: + return m.EqualArray(n) + case Node: + // internal node, but not an array + return false + default: + // external node + if n.Kind() != KindArray { + return false + } + m2, ok := n.(ExternalArray) + if !ok { + return false + } + return m.equalArrayExt(m2) + } +} + +func (m Array) EqualArray(m2 Array) bool { + if len(m) != len(m2) { + return false + } + for i, v := range m { + if !Equal(v, m2[i]) { + return false + } + } + return true +} + +func (m Array) equalArrayExt(m2 ExternalArray) bool { + if len(m) != m2.Size() { + return false + } + for i, v1 := range m { + v2 := m2.ValueAt(i) + if !Equal(v1, v2) { + return false + } + } + return true +} + +func (m Array) SameAs(n External) bool { + // this call relies on the fact that Same will never call SameAs on internal nodes. + return Same(m, n) +} + +func (m *Array) SetNode(n Node) error { + if m2, ok := n.(Array); ok || n == nil { + *m = m2 + return nil + } + return fmt.Errorf("unexpected type: %T", n) +} + +// String is a string value used in tree fields. +type String string + +func (String) isNode() {} +func (String) isValue() {} +func (String) isComparable() {} +func (String) Kind() Kind { + return KindString +} +func (v String) Value() Value { + return v +} + +// Native converts the value to a string. +func (v String) Native() interface{} { + return string(v) +} + +// Clone returns a copy of the value. +func (v String) Clone() Node { + return v +} + +func (v String) Equal(n External) bool { + switch n := n.(type) { + case nil: + return false + case String: + return v == n + case Node: + return false + default: + return v == n.Value() + } +} + +func (v *String) SetNode(n Node) error { + if v2, ok := n.(String); ok || n == nil { + *v = v2 + return nil + } + return fmt.Errorf("unexpected type: %T", n) +} + +func (v String) SameAs(n External) bool { + // this call relies on the fact that Same will never call SameAs on internal nodes. + return Same(v, n) +} + +// Int is a integer value used in tree fields. +type Int int64 + +func (Int) isNode() {} +func (Int) isValue() {} +func (Int) isComparable() {} +func (Int) Kind() Kind { + return KindInt +} +func (v Int) Value() Value { + return v +} + +// Native converts the value to an int64. +func (v Int) Native() interface{} { + return int64(v) +} + +// Clone returns a copy of the value. +func (v Int) Clone() Node { + return v +} + +func (v Int) Equal(n External) bool { + switch n := n.(type) { + case nil: + return false + case Int: + return v == n + case Uint: + if v < 0 { + return false + } + return Uint(v) == n + case Node: + return false + default: + return v == n.Value() + } +} + +func (v *Int) SetNode(n Node) error { + if v2, ok := n.(Int); ok || n == nil { + *v = v2 + return nil + } + return fmt.Errorf("unexpected type: %T", n) +} + +func (v Int) SameAs(n External) bool { + // this call relies on the fact that Same will never call SameAs on internal nodes. + return Same(v, n) +} + +// Uint is a unsigned integer value used in tree fields. +type Uint uint64 + +func (Uint) isNode() {} +func (Uint) isValue() {} +func (Uint) isComparable() {} +func (Uint) Kind() Kind { + return KindUint +} +func (v Uint) Value() Value { + return v +} + +// Native converts the value to an int64. +func (v Uint) Native() interface{} { + return uint64(v) +} + +// Clone returns a copy of the value. +func (v Uint) Clone() Node { + return v +} + +func (v Uint) Equal(n External) bool { + switch n := n.(type) { + case nil: + return false + case Uint: + return v == n + case Int: + if n < 0 { + return false + } + return v == Uint(n) + case Node: + return false + default: + return v == n.Value() + } +} + +func (v *Uint) SetNode(n Node) error { + if v2, ok := n.(Uint); ok || n == nil { + *v = v2 + return nil + } + return fmt.Errorf("unexpected type: %T", n) +} + +func (v Uint) SameAs(n External) bool { + // this call relies on the fact that Same will never call SameAs on internal nodes. + return Same(v, n) +} + +// Float is a floating point value used in tree fields. +type Float float64 + +func (Float) isNode() {} +func (Float) isValue() {} +func (Float) isComparable() {} +func (Float) Kind() Kind { + return KindFloat +} +func (v Float) Value() Value { + return v +} + +// Native converts the value to a float64. +func (v Float) Native() interface{} { + return float64(v) +} + +// Clone returns a copy of the value. +func (v Float) Clone() Node { + return v +} + +func (v Float) Equal(n External) bool { + switch n := n.(type) { + case nil: + return false + case Float: + return v == n + case Node: + return false + default: + return v == n.Value() + } +} + +func (v *Float) SetNode(n Node) error { + if v2, ok := n.(Float); ok || n == nil { + *v = v2 + return nil + } + return fmt.Errorf("unexpected type: %T", n) +} + +func (v Float) SameAs(n External) bool { + // this call relies on the fact that Same will never call SameAs on internal nodes. + return Same(v, n) +} + +// Bool is a boolean value used in tree fields. +type Bool bool + +func (Bool) isNode() {} +func (Bool) isValue() {} +func (Bool) isComparable() {} +func (Bool) Kind() Kind { + return KindBool +} +func (v Bool) Value() Value { + return v +} + +// Native converts the value to a bool. +func (v Bool) Native() interface{} { + return bool(v) +} + +// Clone returns a copy of the value. +func (v Bool) Clone() Node { + return v +} + +func (v Bool) Equal(n External) bool { + switch n := n.(type) { + case nil: + return false + case Bool: + return v == n + case Node: + return false + default: + return v == n.Value() + } +} + +func (v *Bool) SetNode(n Node) error { + if v2, ok := n.(Bool); ok || n == nil { + *v = v2 + return nil + } + return fmt.Errorf("unexpected type: %T", n) +} + +func (v Bool) SameAs(n External) bool { + // this call relies on the fact that Same will never call SameAs on internal nodes. + return Same(v, n) +} + +type ToNodeFunc func(interface{}) (Node, error) + +// ToNode converts objects returned by schema-less encodings such as JSON to Node objects. +func ToNode(o interface{}, fallback ToNodeFunc) (Node, error) { + switch o := o.(type) { + case nil: + return nil, nil + case Node: + return o, nil + case External: + return toNodeExt(o) + case map[string]interface{}: + n := make(Object, len(o)) + for k, v := range o { + nv, err := ToNode(v, fallback) + if err != nil { + return nil, err + } + n[k] = nv + } + return n, nil + case []interface{}: + n := make(Array, 0, len(o)) + for _, v := range o { + nv, err := ToNode(v, fallback) + if err != nil { + return nil, err + } + n = append(n, nv) + } + return n, nil + case string: + return String(o), nil + case int: + return Int(o), nil + case int8: + return Int(o), nil + case int16: + return Int(o), nil + case int32: + return Int(o), nil + case int64: + return Int(o), nil + case uint: + return Uint(o), nil + case uint8: + return Uint(o), nil + case uint16: + return Uint(o), nil + case uint32: + return Uint(o), nil + case uint64: + return Uint(o), nil + case float32: + if float32(int64(o)) != o { + return Float(o), nil + } + return Int(o), nil + case float64: + if float64(int64(o)) != o { + return Float(o), nil + } + return Int(o), nil + case bool: + return Bool(o), nil + default: + if fallback != nil { + return fallback(o) + } + return nil, fmt.Errorf("unsupported type: %T", o) + } +} + +// ToString converts a value to a string. +func ToString(v Value) string { + switch v := v.(type) { + case nil: + return "" + case String: + return string(v) + case Int: + return strconv.FormatInt(int64(v), 10) + case Uint: + return strconv.FormatUint(uint64(v), 10) + case Bool: + if v { + return "true" + } + return "false" + case Float: + return strconv.FormatFloat(float64(v), 'g', -1, 64) + default: + return fmt.Sprint(v) + } +} + +// WalkPreOrder visits all nodes of the tree in pre-order. +func WalkPreOrder(root Node, walk func(Node) bool) { + if !walk(root) { + return + } + switch n := root.(type) { + case Object: + for _, k := range n.Keys() { + WalkPreOrder(n[k], walk) + } + case Array: + for _, s := range n { + WalkPreOrder(s, walk) + } + } +} + +// WalkPreOrderExt visits all nodes of the tree in pre-order. +func WalkPreOrderExt(root External, walk func(External) bool) { + if !walk(root) { + return + } + switch KindOf(root) { + case KindObject: + if n, ok := root.(ExternalObject); ok { + for _, k := range n.Keys() { + v, _ := n.ValueAt(k) + WalkPreOrderExt(v, walk) + } + } + case KindArray: + if n, ok := root.(ExternalArray); ok { + sz := n.Size() + for i := 0; i < sz; i++ { + v := n.ValueAt(i) + WalkPreOrderExt(v, walk) + } + } + } +} + +// Count returns a number of nodes with given kinds. +func Count(root External, kinds Kind) int { + var cnt int + WalkPreOrderExt(root, func(n External) bool { + if KindOf(n).In(kinds) { + cnt++ + } + return true + }) + return cnt +} + +// Apply takes a root node and applies callback to each node of the tree recursively. +// Apply returns an old or a new node and a flag that indicates if node was changed or not. +// If callback returns true and a new node, Apply will make a copy of parent node and +// will replace an old value with a new one. It will make a copy of all parent +// nodes recursively in this case. +func Apply(root Node, apply func(n Node) (Node, bool)) (Node, bool) { + if root == nil { + return nil, false + } + var changed bool + switch n := root.(type) { + case Object: + var nn Object + if applySort { + for _, k := range n.Keys() { + v := n[k] + if nv, ok := Apply(v, apply); ok { + if nn == nil { + nn = n.CloneObject() + } + nn[k] = nv + } + } + } else { + for k, v := range n { + if nv, ok := Apply(v, apply); ok { + if nn == nil { + nn = n.CloneObject() + } + nn[k] = nv + } + } + } + if nn != nil { + changed = true + root = nn + } + case Array: + var nn Array + for i, v := range n { + if nv, ok := Apply(v, apply); ok { + if nn == nil { + nn = n.CloneList() + } + nn[i] = nv + } + } + if nn != nil { + changed = true + root = nn + } + } + nn, changed2 := apply(root) + return nn, changed || changed2 +} + +// Same check if two nodes represent exactly the same node. This usually means compare nodes by pointers. +func Same(n1, n2 External) bool { + if n1 == nil && n2 == nil { + return true + } else if n1 == nil || n2 == nil { + return false + } + if n1.Kind() != n2.Kind() { + return false + } + i1, ok := n1.(Node) + if !ok { + // first node is external, need to call SameAs on it + return n1.SameAs(n2) + } + i2, ok := n2.(Node) + if !ok { + // second node is external, need to call SameAs on it + return n2.SameAs(n1) + } + // both nodes are internal - compare unique key + return UniqueKey(i1) == UniqueKey(i2) +} + +// pointerOf returns a Go pointer for Node that is a reference type (Arrays and Objects). +func pointerOf(n Node) uintptr { + if n == nil { + return 0 + } + v := reflect.ValueOf(n) + if v.IsNil() { + return 0 + } + return v.Pointer() +} + +type arrayPtr uintptr + +func (arrayPtr) isComparable() {} + +type mapPtr uintptr + +func (mapPtr) isComparable() {} + +type unkPtr uintptr + +func (unkPtr) isComparable() {} + +// Comparable is an interface for comparable values that are guaranteed to be safely used as map keys. +type Comparable interface { + isComparable() +} + +// UniqueKey returns a unique key of the node in the current tree. The key can be used in maps. +func UniqueKey(n Node) Comparable { + switch n := n.(type) { + case nil: + return nil + case Value: + return n + default: + ptr := pointerOf(n) + // distinguish nil arrays and maps + switch n.(type) { + case Object: + return mapPtr(ptr) + case Array: + return arrayPtr(ptr) + } + return unkPtr(ptr) + } +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/nodes.pb.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/nodes.pb.go new file mode 100644 index 0000000..f13361a --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/nodes.pb.go @@ -0,0 +1,1214 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: nodes.proto + +/* + Package nodesproto is a generated protocol buffer package. + + It is generated from these files: + nodes.proto + + It has these top-level messages: + GraphHeader + Node +*/ +package nodesproto + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import binary "encoding/binary" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// GraphHeader is the first message written over the wire before transfering graph nodes. +// +// It should be preceded by the magic number "\x00bgr" (4 bytes) and the version number 0x1 +// written as a little-endian 4 byte integer. Next, the length of the header message should be +// written in varint encoding, directly followed by the message itself. +type GraphHeader struct { + // LastID is a last node ID used by ID allocator for this graph. Implementation may reserve some + // IDs space by setting LastID > max(nodes.ID). If not set, max(nodes.ID) is assumed. + // Tools that manipulate the graph, but want to preserve IDs of the nodes should allocate IDs + // starting from LastID+1. + LastId uint64 `protobuf:"varint,1,opt,name=last_id,json=lastId,proto3" json:"last_id,omitempty"` + // Root is an optional ID of a global root for this graph. This field is used to preserve + // compatibility with tools that expect a tree-shaped data. + // Implementation may also store multiple roots by referencing an Array node, or may store + // multiple named roots by referencing an Object node. + // If not set explicitly, implementations that expect a tree should search the graph for + // unused nodes of type Object or Array and treat them as an array of roots. + Root uint64 `protobuf:"varint,2,opt,name=root,proto3" json:"root,omitempty"` + // Metadata is an optional ID for a metadata node for this file. Should reference an Object. + // If set, implementations that expect a tree should exclude it from the list of roots. + Metadata uint64 `protobuf:"varint,3,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (m *GraphHeader) Reset() { *m = GraphHeader{} } +func (m *GraphHeader) String() string { return proto.CompactTextString(m) } +func (*GraphHeader) ProtoMessage() {} +func (*GraphHeader) Descriptor() ([]byte, []int) { return fileDescriptorNodes, []int{0} } + +func (m *GraphHeader) GetLastId() uint64 { + if m != nil { + return m.LastId + } + return 0 +} + +func (m *GraphHeader) GetRoot() uint64 { + if m != nil { + return m.Root + } + return 0 +} + +func (m *GraphHeader) GetMetadata() uint64 { + if m != nil { + return m.Metadata + } + return 0 +} + +// Node represents any node that can be stored in the graph. +// +// A list of Node messages follows directly after GraphHeader and each such message should be preceded by its length +// written in varint encoding. Nodes in the list should always be sorted by ID (ascending). +// ID of a node can be zero, in this case ID will be assigned automatically as prevNode.ID+1 (starting from 1). +// +// In general there is 3 kinds of nodes: Values, Arrays, Objects. +// If Value oneof field is set, all other fields are ignored, and node is decoded as a value node (leaf). +// If any of Keys or KeysFrom fields are set, the node is an Object (set of key-value pairs). +// Only Keys/KeysFrom and Values fields are considered in this case. +// In other cases a node is an Array, and only Values field is considered. +type Node struct { + // ID is a unique file-local ID of the node. + // To implement global IDs, application should write additional data to the graph + // or keep a mapping from file-local IDs to global ones. + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + // Value is a union for primitive value types. + // These values are always leaf nodes, they should never store any references to other nodes, + // or other graph-specific information. + // It is assumed that encoded Value can be copied to a different Graph without any changes. + // + // Types that are valid to be assigned to Value: + // *Node_String_ + // *Node_Int + // *Node_Uint + // *Node_Float + // *Node_Bool + Value isNode_Value `protobuf_oneof:"value"` + // Keys is an ordered set of Object keys. Coresponding values are stored in Values field. + Keys []uint64 `protobuf:"varint,7,rep,packed,name=keys" json:"keys,omitempty"` + // KeysFrom can refer to a node ID previously seen on wire. In this case, Keys from that node + // are copied to Keys field of current node. Thus, full list of Keys can be omited. + KeysFrom uint64 `protobuf:"varint,10,opt,name=keys_from,json=keysFrom,proto3" json:"keys_from,omitempty"` + // Values stores an ordered list of node IDs. Zero ID represents a null node. + // For Array node this field represent an array itself, and for Object nodes + // this field is a set of values that coresponds to keys defined by Keys or KeysFrom. + Values []uint64 `protobuf:"varint,8,rep,packed,name=values" json:"values,omitempty"` + // IsObject is a helper field to distinguish between empty arrays and empty objects. + IsObject bool `protobuf:"varint,9,opt,name=is_object,json=isObject,proto3" json:"is_object,omitempty"` + // ValuesOffs is an offset added to all value IDs. Used for compression. + ValuesOffs uint64 `protobuf:"varint,11,opt,name=values_offs,json=valuesOffs,proto3" json:"values_offs,omitempty"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorNodes, []int{1} } + +type isNode_Value interface { + isNode_Value() + MarshalTo([]byte) (int, error) + ProtoSize() int +} + +type Node_String_ struct { + String_ string `protobuf:"bytes,2,opt,name=string,proto3,oneof"` +} +type Node_Int struct { + Int int64 `protobuf:"varint,3,opt,name=int,proto3,oneof"` +} +type Node_Uint struct { + Uint uint64 `protobuf:"varint,4,opt,name=uint,proto3,oneof"` +} +type Node_Float struct { + Float float64 `protobuf:"fixed64,5,opt,name=float,proto3,oneof"` +} +type Node_Bool struct { + Bool bool `protobuf:"varint,6,opt,name=bool,proto3,oneof"` +} + +func (*Node_String_) isNode_Value() {} +func (*Node_Int) isNode_Value() {} +func (*Node_Uint) isNode_Value() {} +func (*Node_Float) isNode_Value() {} +func (*Node_Bool) isNode_Value() {} + +func (m *Node) GetValue() isNode_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Node) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Node) GetString_() string { + if x, ok := m.GetValue().(*Node_String_); ok { + return x.String_ + } + return "" +} + +func (m *Node) GetInt() int64 { + if x, ok := m.GetValue().(*Node_Int); ok { + return x.Int + } + return 0 +} + +func (m *Node) GetUint() uint64 { + if x, ok := m.GetValue().(*Node_Uint); ok { + return x.Uint + } + return 0 +} + +func (m *Node) GetFloat() float64 { + if x, ok := m.GetValue().(*Node_Float); ok { + return x.Float + } + return 0 +} + +func (m *Node) GetBool() bool { + if x, ok := m.GetValue().(*Node_Bool); ok { + return x.Bool + } + return false +} + +func (m *Node) GetKeys() []uint64 { + if m != nil { + return m.Keys + } + return nil +} + +func (m *Node) GetKeysFrom() uint64 { + if m != nil { + return m.KeysFrom + } + return 0 +} + +func (m *Node) GetValues() []uint64 { + if m != nil { + return m.Values + } + return nil +} + +func (m *Node) GetIsObject() bool { + if m != nil { + return m.IsObject + } + return false +} + +func (m *Node) GetValuesOffs() uint64 { + if m != nil { + return m.ValuesOffs + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Node) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Node_OneofMarshaler, _Node_OneofUnmarshaler, _Node_OneofSizer, []interface{}{ + (*Node_String_)(nil), + (*Node_Int)(nil), + (*Node_Uint)(nil), + (*Node_Float)(nil), + (*Node_Bool)(nil), + } +} + +func _Node_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Node) + // value + switch x := m.Value.(type) { + case *Node_String_: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.String_) + case *Node_Int: + _ = b.EncodeVarint(3<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Int)) + case *Node_Uint: + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Uint)) + case *Node_Float: + _ = b.EncodeVarint(5<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(math.Float64bits(x.Float)) + case *Node_Bool: + t := uint64(0) + if x.Bool { + t = 1 + } + _ = b.EncodeVarint(6<<3 | proto.WireVarint) + _ = b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("Node.Value has unexpected type %T", x) + } + return nil +} + +func _Node_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Node) + switch tag { + case 2: // value.string + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &Node_String_{x} + return true, err + case 3: // value.int + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_Int{int64(x)} + return true, err + case 4: // value.uint + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_Uint{x} + return true, err + case 5: // value.float + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &Node_Float{math.Float64frombits(x)} + return true, err + case 6: // value.bool + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_Bool{x != 0} + return true, err + default: + return false, nil + } +} + +func _Node_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Node) + // value + switch x := m.Value.(type) { + case *Node_String_: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.String_))) + n += len(x.String_) + case *Node_Int: + n += proto.SizeVarint(3<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Int)) + case *Node_Uint: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Uint)) + case *Node_Float: + n += proto.SizeVarint(5<<3 | proto.WireFixed64) + n += 8 + case *Node_Bool: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*GraphHeader)(nil), "gopkg.in.bblfsh.sdk.v2.uast.nodes.GraphHeader") + proto.RegisterType((*Node)(nil), "gopkg.in.bblfsh.sdk.v2.uast.nodes.Node") +} +func (m *GraphHeader) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GraphHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LastId != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.LastId)) + } + if m.Root != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.Root)) + } + if m.Metadata != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.Metadata)) + } + return i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.ProtoSize() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Id != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.Id)) + } + if m.Value != nil { + nn1, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + if len(m.Keys) > 0 { + dAtA3 := make([]byte, len(m.Keys)*10) + var j2 int + for _, num := range m.Keys { + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + dAtA[i] = 0x3a + i++ + i = encodeVarintNodes(dAtA, i, uint64(j2)) + i += copy(dAtA[i:], dAtA3[:j2]) + } + if len(m.Values) > 0 { + dAtA5 := make([]byte, len(m.Values)*10) + var j4 int + for _, num := range m.Values { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + dAtA[i] = 0x42 + i++ + i = encodeVarintNodes(dAtA, i, uint64(j4)) + i += copy(dAtA[i:], dAtA5[:j4]) + } + if m.IsObject { + dAtA[i] = 0x48 + i++ + if m.IsObject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.KeysFrom != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.KeysFrom)) + } + if m.ValuesOffs != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.ValuesOffs)) + } + return i, nil +} + +func (m *Node_String_) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintNodes(dAtA, i, uint64(len(m.String_))) + i += copy(dAtA[i:], m.String_) + return i, nil +} +func (m *Node_Int) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x18 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.Int)) + return i, nil +} +func (m *Node_Uint) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + i = encodeVarintNodes(dAtA, i, uint64(m.Uint)) + return i, nil +} +func (m *Node_Float) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x29 + i++ + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Float)))) + i += 8 + return i, nil +} +func (m *Node_Bool) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x30 + i++ + if m.Bool { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} +func encodeVarintNodes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *GraphHeader) ProtoSize() (n int) { + var l int + _ = l + if m.LastId != 0 { + n += 1 + sovNodes(uint64(m.LastId)) + } + if m.Root != 0 { + n += 1 + sovNodes(uint64(m.Root)) + } + if m.Metadata != 0 { + n += 1 + sovNodes(uint64(m.Metadata)) + } + return n +} + +func (m *Node) ProtoSize() (n int) { + var l int + _ = l + if m.Id != 0 { + n += 1 + sovNodes(uint64(m.Id)) + } + if m.Value != nil { + n += m.Value.ProtoSize() + } + if len(m.Keys) > 0 { + l = 0 + for _, e := range m.Keys { + l += sovNodes(uint64(e)) + } + n += 1 + sovNodes(uint64(l)) + l + } + if len(m.Values) > 0 { + l = 0 + for _, e := range m.Values { + l += sovNodes(uint64(e)) + } + n += 1 + sovNodes(uint64(l)) + l + } + if m.IsObject { + n += 2 + } + if m.KeysFrom != 0 { + n += 1 + sovNodes(uint64(m.KeysFrom)) + } + if m.ValuesOffs != 0 { + n += 1 + sovNodes(uint64(m.ValuesOffs)) + } + return n +} + +func (m *Node_String_) ProtoSize() (n int) { + var l int + _ = l + l = len(m.String_) + n += 1 + l + sovNodes(uint64(l)) + return n +} +func (m *Node_Int) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovNodes(uint64(m.Int)) + return n +} +func (m *Node_Uint) ProtoSize() (n int) { + var l int + _ = l + n += 1 + sovNodes(uint64(m.Uint)) + return n +} +func (m *Node_Float) ProtoSize() (n int) { + var l int + _ = l + n += 9 + return n +} +func (m *Node_Bool) ProtoSize() (n int) { + var l int + _ = l + n += 2 + return n +} + +func sovNodes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozNodes(x uint64) (n int) { + return sovNodes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GraphHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GraphHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GraphHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastId", wireType) + } + m.LastId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastId |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + m.Root = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Root |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + m.Metadata = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Metadata |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipNodes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNodes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &Node_String_{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Int", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &Node_Int{v} + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uint", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &Node_Uint{v} + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Float", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &Node_Float{float64(math.Float64frombits(v))} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Bool", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Value = &Node_Bool{b} + case 7: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = append(m.Keys, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthNodes + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Keys = append(m.Keys, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + case 8: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthNodes + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsObject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IsObject = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeysFrom", wireType) + } + m.KeysFrom = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeysFrom |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValuesOffs", wireType) + } + m.ValuesOffs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNodes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValuesOffs |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipNodes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNodes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNodes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthNodes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNodes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipNodes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthNodes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNodes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("nodes.proto", fileDescriptorNodes) } + +var fileDescriptorNodes = []byte{ + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x91, 0xc1, 0x8e, 0xd3, 0x30, + 0x10, 0x86, 0xe3, 0x36, 0x4d, 0xdb, 0xa9, 0xc4, 0xc1, 0x42, 0x8b, 0xb5, 0xa0, 0x10, 0xf6, 0xd4, + 0x0b, 0x5e, 0x09, 0xde, 0x60, 0x0f, 0x10, 0x2e, 0xac, 0x94, 0x03, 0x07, 0x2e, 0x91, 0xb3, 0xb6, + 0x53, 0xd3, 0x24, 0x53, 0xc5, 0xce, 0x4a, 0xbc, 0x09, 0x47, 0x1e, 0x87, 0x23, 0x8f, 0x80, 0xca, + 0x8b, 0xac, 0x3c, 0x59, 0xf5, 0xe4, 0xf9, 0xfe, 0x99, 0xff, 0xd7, 0x58, 0x03, 0xbb, 0x01, 0xb5, + 0xf1, 0xf2, 0x34, 0x62, 0x40, 0xfe, 0xae, 0xc5, 0xd3, 0xb1, 0x95, 0x6e, 0x90, 0x4d, 0xd3, 0x59, + 0x7f, 0x90, 0x5e, 0x1f, 0xe5, 0xe3, 0x07, 0x39, 0x29, 0x1f, 0x24, 0x0d, 0x5e, 0xbf, 0x6f, 0x5d, + 0x38, 0x4c, 0x8d, 0x7c, 0xc0, 0xfe, 0xb6, 0xc5, 0x16, 0x6f, 0xc9, 0xd9, 0x4c, 0x96, 0x88, 0x80, + 0xaa, 0x39, 0xf1, 0xe6, 0x1b, 0xec, 0x3e, 0x8f, 0xea, 0x74, 0x28, 0x8d, 0xd2, 0x66, 0xe4, 0xaf, + 0x60, 0xdd, 0x29, 0x1f, 0x6a, 0xa7, 0x05, 0x2b, 0xd8, 0x3e, 0xad, 0xb2, 0x88, 0x5f, 0x34, 0xe7, + 0x90, 0x8e, 0x88, 0x41, 0x2c, 0x48, 0xa5, 0x9a, 0x5f, 0xc3, 0xa6, 0x37, 0x41, 0x69, 0x15, 0x94, + 0x58, 0x92, 0x7e, 0xe1, 0x9b, 0x5f, 0x0b, 0x48, 0xbf, 0xa2, 0x36, 0xfc, 0x05, 0x2c, 0x2e, 0x61, + 0x0b, 0xa7, 0xb9, 0x80, 0xcc, 0x87, 0xd1, 0x0d, 0x2d, 0x45, 0x6d, 0xcb, 0xa4, 0x7a, 0x66, 0xce, + 0x61, 0xe9, 0x86, 0x40, 0x49, 0xcb, 0x32, 0xa9, 0x22, 0xf0, 0x97, 0x90, 0x4e, 0x51, 0x4c, 0xa3, + 0xbf, 0x4c, 0x2a, 0x22, 0x7e, 0x05, 0x2b, 0xdb, 0xa1, 0x0a, 0x62, 0x55, 0xb0, 0x3d, 0x2b, 0x93, + 0x6a, 0xc6, 0x38, 0xdd, 0x20, 0x76, 0x22, 0x2b, 0xd8, 0x7e, 0x13, 0xa7, 0x23, 0xc5, 0xd5, 0x8f, + 0xe6, 0xa7, 0x17, 0xeb, 0x62, 0x19, 0x57, 0x8f, 0x35, 0xbf, 0x82, 0xec, 0x51, 0x75, 0x93, 0xf1, + 0x62, 0x43, 0xea, 0x33, 0xf1, 0xd7, 0xb0, 0x75, 0xbe, 0xc6, 0xe6, 0x87, 0x79, 0x08, 0x62, 0x1b, + 0x63, 0xaa, 0x8d, 0xf3, 0xf7, 0xc4, 0xb1, 0x19, 0xcd, 0xb5, 0x1d, 0xb1, 0x17, 0x30, 0x7f, 0x38, + 0x0a, 0x9f, 0x46, 0xec, 0xf9, 0x5b, 0xd8, 0xcd, 0x19, 0x35, 0x5a, 0xeb, 0xc5, 0x8e, 0xda, 0x30, + 0x4b, 0xf7, 0xd6, 0xfa, 0xbb, 0x35, 0xac, 0x88, 0xee, 0xde, 0xfc, 0x39, 0xe7, 0xec, 0xef, 0x39, + 0x67, 0xff, 0xce, 0x79, 0xf2, 0xfb, 0x7f, 0xce, 0xbe, 0x03, 0x9d, 0x6e, 0x3e, 0x54, 0x46, 0xcf, + 0xc7, 0xa7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x97, 0x1c, 0x32, 0xf8, 0x01, 0x00, 0x00, +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/nodesproto.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/nodesproto.go new file mode 100644 index 0000000..b0e530d --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/nodesproto.go @@ -0,0 +1,601 @@ +package nodesproto + +import ( + "crypto/md5" + "encoding/binary" + "fmt" + "io" + "sort" + + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/pio" +) + +//go:generate protoc --proto_path=$GOPATH/src:. --gogo_out=. nodes.proto + +const ( + magic = "\x00bgr" + version = 0x1 +) + +const ( + keysDiff = false + valsOffs = true + dedupNodes = true +) + +const stats = true + +var ( + MapSize int + ArrSize int + ValSize int + DelimSize int + + KeysCnt int + KeysFromCnt int + DupsCnt int +) + +func WriteTo(w io.Writer, n nodes.Node) error { + var header [8]byte + copy(header[:4], magic) + binary.LittleEndian.PutUint32(header[4:], version) + _, err := w.Write(header[:]) + if err != nil { + return err + } + + tw := newTreeWriter() + root := tw.addNode(n) + + gh := &GraphHeader{ + LastId: uint64(len(tw.nodes) + 1), + Root: root, + } + + pw := pio.NewWriter(w) + + _, err = pw.WriteMsg(gh) + if err != nil { + return err + } + var lastID uint64 + for _, n := range tw.nodes { + id := n.Id + if id == lastID+1 { + n.Id = 0 // omit the field + } + lastID = id + + if stats { + sz := n.ProtoSize() + if n.Value != nil { + ValSize += sz + } else if len(n.Keys) != 0 || n.KeysFrom != 0 { + if n.KeysFrom != 0 { + KeysFromCnt++ + } else { + KeysCnt++ + } + MapSize += sz + } else { + ArrSize += sz + } + DelimSize += sovNodes(uint64(sz)) + } + + _, err = pw.WriteMsg(n) + if err != nil { + return err + } + } + return nil +} + +func newTreeWriter() *treeWriter { + return &treeWriter{ + vals: make(map[nodes.Value]uint64), + keys: make(map[khash]uint64), + dups: make(map[[2]khash]uint64), + } +} + +type khash [md5.Size]byte + +func hashUints(arr []uint64) khash { + h := md5.New() + var p [8]byte + for _, v := range arr { + binary.LittleEndian.PutUint64(p[:], v) + h.Write(p[:]) + } + var kh khash + _ = h.Sum(kh[:0]) + return kh +} + +type treeWriter struct { + nodes []*Node + vals map[nodes.Value]uint64 + keys map[khash]uint64 + dups map[[2]khash]uint64 +} + +func newVal(id uint64, v nodes.Value) *Node { + var pv isNode_Value + switch v := v.(type) { + case nodes.String: + pv = &Node_String_{String_: string(v)} + case nodes.Int: + pv = &Node_Int{Int: int64(v)} + case nodes.Uint: + pv = &Node_Uint{Uint: uint64(v)} + case nodes.Bool: + pv = &Node_Bool{Bool: bool(v)} + case nodes.Float: + pv = &Node_Float{Float: float64(v)} + default: + panic(fmt.Errorf("unexpected type: %T", v)) + } + return &Node{Id: id, Value: pv} +} + +type ksSort struct { + keys []uint64 + skeys []string +} + +func (arr ksSort) Len() int { + return len(arr.keys) +} + +func (arr ksSort) Less(i, j int) bool { + return arr.keys[i] < arr.keys[j] +} + +func (arr ksSort) Swap(i, j int) { + arr.keys[i], arr.keys[j] = arr.keys[j], arr.keys[i] + arr.skeys[i], arr.skeys[j] = arr.skeys[j], arr.skeys[i] +} + +func (g *treeWriter) mapObj(id uint64, n nodes.Object) (*Node, uint64) { + var ( + keys = make([]uint64, 0, len(n)) + vals = make([]uint64, 0, len(n)) + ) + skeys := n.Keys() + + for _, k := range skeys { + kid := g.addNode(nodes.String(k)) + keys = append(keys, kid) + } + sort.Sort(ksSort{keys: keys, skeys: skeys}) + + nd := &Node{Id: id, Keys: keys, IsObject: len(n) == 0} + + keyh := hashUints(keys) + if oid, ok := g.keys[keyh]; ok { + nd.Keys, nd.KeysFrom = nil, oid + } else { + g.keys[keyh] = id + if keysDiff && len(nd.Keys) > 1 { + cur := nd.Keys[0] + for i := 1; i < len(nd.Keys); i++ { + v := nd.Keys[i] + nd.Keys[i] = v - cur + cur = v + } + } + } + + var minVal uint64 + for i, k := range skeys { + v := n[k] + vid := g.addNode(v) + if valsOffs && (vid < minVal || i == 0) { + minVal = vid + } + vals = append(vals, vid) + } + if dedupNodes { + valh := hashUints(vals) + if oid, ok := g.dups[[2]khash{keyh, valh}]; ok { + if stats { + DupsCnt++ + } + return nil, oid + } + g.dups[[2]khash{keyh, valh}] = id + } + if minVal != 0 { + diff := sovNodes(minVal) + 1 + for i := range vals { + diff += sovNodes(vals[i]-minVal) - sovNodes(vals[i]) + } + if diff < 0 { + for i := range vals { + vals[i] -= minVal + } + } else { + minVal = 0 + } + } + nd.Values, nd.ValuesOffs = vals, minVal + return nd, id +} +func (g *treeWriter) mapArr(id uint64, n nodes.Array) (*Node, uint64) { + ids := make([]uint64, 0, len(n)) + for _, s := range n { + vid := g.addNode(s) + ids = append(ids, vid) + } + if dedupNodes { + keyh, valh := khash{}, hashUints(ids) + if oid, ok := g.dups[[2]khash{keyh, valh}]; ok { + if stats { + DupsCnt++ + } + return nil, oid + } + g.dups[[2]khash{keyh, valh}] = id + } + return &Node{Id: id, Values: ids}, id +} +func (g *treeWriter) addNode(n nodes.Node) uint64 { + if n == nil { + return 0 + } else if v, ok := n.(nodes.Value); ok { + if id, ok := g.vals[v]; ok { + return id + } + } + g.nodes = append(g.nodes, nil) + id := uint64(len(g.nodes)) + csz := len(g.nodes) + + var nd *Node + switch n := n.(type) { + case nodes.Object: + var oid uint64 + nd, oid = g.mapObj(id, n) + if nd == nil { + if csz != len(g.nodes) { + panic(fmt.Errorf("unexpected node count")) + } + g.nodes = g.nodes[:len(g.nodes)-1] + return oid + } + case nodes.Array: + var oid uint64 + nd, oid = g.mapArr(id, n) + if nd == nil { + if csz != len(g.nodes) { + panic(fmt.Errorf("unexpected node count")) + } + g.nodes = g.nodes[:len(g.nodes)-1] + return oid + } + case nodes.Value: + nd = newVal(id, n) + default: + panic(fmt.Errorf("unexpected type: %T", n)) + } + g.nodes[id-1] = nd + if v, ok := n.(nodes.Value); ok { + g.vals[v] = id + } + return id +} + +// ReadTree reads a binary graph from r and tries to decode it as a tree. +// If the graph is cyclic, an error is returned. +func ReadTree(r io.Reader) (nodes.Node, error) { + g := newGraphReader() + if err := g.readGraph(r); err != nil { + return nil, err + } + return g.asTree() +} + +type RawNode struct { + ID uint64 `json:"id"` + Kind nodes.Kind `json:"kind"` + Value nodes.Value `json:"val,omitempty"` + Keys []uint64 `json:"keys,omitempty"` + Values []uint64 `json:"values,omitempty"` +} + +type RawGraph struct { + Root uint64 `json:"root,omitempty"` + Meta uint64 `json:"meta,omitempty"` + Last uint64 `json:"last,omitempty"` + Nodes map[uint64]RawNode `json:"nodes"` +} + +// ReadRaw reads a graph from a binary stream, returning a flat list of all nodes. +func ReadRaw(r io.Reader) (*RawGraph, error) { + g := newGraphReader() + if err := g.readGraph(r); err != nil { + return nil, err + } + rg := &RawGraph{ + Root: g.root, Meta: g.meta, Last: g.last, + Nodes: make(map[uint64]RawNode, len(g.nodes)), + } + for id, n := range g.nodes { + nd := RawNode{ID: id, Kind: n.Kind()} + switch nd.Kind { + case nodes.KindObject: + nd.Keys = n.Keys + nd.Values = n.Values + case nodes.KindArray: + nd.Values = n.Values + default: + v, err := asValue(n) + if err != nil { + return rg, err + } + nd.Value = v + } + rg.Nodes[nd.ID] = nd + } + return rg, nil +} + +func newGraphReader() *graphReader { + return &graphReader{} +} + +type graphReader struct { + nodes map[uint64]*Node + detached []uint64 + root uint64 + meta uint64 + last uint64 +} + +func (g *graphReader) readHeader(r io.Reader) error { + var b [8]byte + n, err := r.Read(b[:]) + if err == io.EOF { + return io.ErrUnexpectedEOF + } else if err != nil { + return err + } else if n != len(b) { + return fmt.Errorf("short read") + } + if string(b[:4]) != magic { + return fmt.Errorf("not a graph file") + } + vers := binary.LittleEndian.Uint32(b[4:]) + if vers != version { + return fmt.Errorf("unsupported version: %x", vers) + } + return nil +} +func (g *graphReader) readGraph(r io.Reader) error { + if err := g.readHeader(r); err != nil { + return err + } + pr := pio.NewReader(r, 10*1024*1024) + var gh GraphHeader + if err := pr.ReadMsg(&gh); err != nil { + return err + } + g.last, g.root, g.meta = gh.LastId, gh.Root, gh.Metadata + var ( + prevID uint64 + nodes = make(map[uint64]*Node) + ) + for { + nd := &Node{} + if err := pr.ReadMsg(nd); err == io.EOF { + break + } else if err != nil { + return err + } + + if nd.Id == 0 { + // allow to omit ID + nd.Id = prevID + 1 + } else if prevID >= nd.Id { + // but IDs should be ascending + return fmt.Errorf("node IDs should be ascending") + } + prevID = nd.Id + // there should be no duplicates + if _, ok := nodes[nd.Id]; ok { + return fmt.Errorf("duplicate node with id %d", nd.Id) + } + // support KeysFrom + if nd.KeysFrom != 0 { + n2, ok := nodes[nd.KeysFrom] + if !ok { + return fmt.Errorf("KeysFrom refers to an undefined node %d", nd.KeysFrom) + } + nd.Keys = n2.Keys + } else if keysDiff && len(nd.Keys) > 1 { + cur := nd.Keys[0] + for i := 1; i < len(nd.Keys); i++ { + v := nd.Keys[i] + v += cur + nd.Keys[i] = v + cur = v + } + } + // support ValuesOffs + if nd.ValuesOffs != 0 { + for i := range nd.Values { + nd.Values[i] += nd.ValuesOffs + } + } + + nodes[nd.Id] = nd + } + refs := make(map[uint64]struct{}, len(nodes)) + roots := make(map[uint64]struct{}) + use := func(id uint64) { + refs[id] = struct{}{} + delete(roots, id) + } + + for _, n := range nodes { + for _, id := range n.Keys { + use(id) + } + for _, id := range n.Values { + use(id) + } + if _, ok := refs[n.Id]; !ok { + roots[n.Id] = struct{}{} + } + } + arr := make([]uint64, 0, len(roots)) + for id := range roots { + arr = append(arr, id) + } + sort.Slice(arr, func(i, j int) bool { + return arr[i] < arr[j] + }) + g.nodes = nodes + g.detached = arr + // root is optional - detect automatically if not set + if g.root == 0 && len(g.detached) != 0 { + if len(g.detached) == 1 { + g.root = g.detached[0] + } else { + g.last++ + id := g.last + g.nodes[id] = &Node{Id: id, Values: g.detached} + } + } + return nil +} + +func (g *graphReader) asTree() (nodes.Node, error) { + if g.root == 0 { + return nil, nil + } + seen := make(map[uint64]bool, len(g.nodes)) + return g.asNode(g.root, seen) +} + +func (m *Node) Kind() nodes.Kind { + if m.Value != nil { + v, _ := asValue(m) + return nodes.KindOf(v) + } + if len(m.Keys) != 0 || m.IsObject { + return nodes.KindObject + } + return nodes.KindArray +} + +func asValue(n *Node) (nodes.Value, error) { + switch n := n.Value.(type) { + case *Node_String_: + return nodes.String(n.String_), nil + case *Node_Int: + return nodes.Int(n.Int), nil + case *Node_Uint: + return nodes.Uint(n.Uint), nil + case *Node_Bool: + return nodes.Bool(n.Bool), nil + case *Node_Float: + return nodes.Float(n.Float), nil + } + return nil, fmt.Errorf("unsupported node type: %T", n.Value) +} +func (g *graphReader) asNode(id uint64, seen map[uint64]bool) (nodes.Node, error) { + if id == 0 { + return nil, nil + } + n, ok := g.nodes[id] + if !ok { + return nil, fmt.Errorf("node %v is not defined", id) + } + if n.Value == nil { + // loops are not allowed + if leaf, ok := seen[id]; ok && !leaf { + return nil, fmt.Errorf("not a tree") + } + } + isLeaf := func() bool { + for _, sid := range n.Keys { + if sid == 0 { + continue + } + if l, ok := seen[sid]; ok { + if l { + continue + } + return false + } + if g.nodes[sid].Value == nil { + return false + } + } + for _, sid := range n.Values { + if sid == 0 { + continue + } + if l, ok := seen[sid]; ok { + if l { + continue + } + return false + } + if g.nodes[sid].Value == nil { + return false + } + } + return true + } + leaf := n.Value != nil + if !leaf { + leaf = isLeaf() + } + seen[id] = leaf + if n.Value != nil { + return asValue(n) + } + var out nodes.Node + if n.Kind() == nodes.KindObject { + if len(n.Keys) != len(n.Values) { + return nil, fmt.Errorf("number of keys doesn't match a number of values: %d vs %d", len(n.Keys), len(n.Values)) + } + m := make(nodes.Object, len(n.Keys)) + for i, k := range n.Keys { + nk, err := g.asNode(k, seen) + if err != nil { + return nil, err + } + sk, ok := nk.(nodes.String) + if !ok { + return nil, fmt.Errorf("only string keys are supported") + } + v := n.Values[i] + nv, err := g.asNode(v, seen) + if err != nil { + return nil, err + } + m[string(sk)] = nv + } + out = m + } else { + m := make(nodes.Array, 0, len(n.Values)) + for _, v := range n.Values { + nv, err := g.asNode(v, seen) + if err != nil { + return nil, err + } + m = append(m, nv) + } + out = m + } + if !leaf && isLeaf() { + seen[id] = true + } + return out, nil +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/pio/io.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/pio/io.go new file mode 100644 index 0000000..813aa2d --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/pio/io.go @@ -0,0 +1,60 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package pio + +import ( + "github.com/gogo/protobuf/proto" +) + +type Writer interface { + WriteMsg(proto.Message) (int, error) +} + +type Reader interface { + ReadMsg(msg proto.Message) error + SkipMsg() error +} + +type marshaler interface { + MarshalTo(data []byte) (n int, err error) +} + +func getSize(v interface{}) (int, bool) { + if sz, ok := v.(interface { + Size() (n int) + }); ok { + return sz.Size(), true + } else if sz, ok := v.(interface { + ProtoSize() (n int) + }); ok { + return sz.ProtoSize(), true + } else { + return 0, false + } +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/pio/varint.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/pio/varint.go new file mode 100644 index 0000000..7344882 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/nodes/nodesproto/pio/varint.go @@ -0,0 +1,145 @@ +// Extensions for Protocol Buffers to create more go like structures. +// +// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// http://github.com/gogo/protobuf/gogoproto +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package pio + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + + "github.com/gogo/protobuf/proto" +) + +var ( + errSmallBuffer = errors.New("Buffer Too Small") + errLargeValue = errors.New("Value is Larger than 64 bits") +) + +func NewWriter(w io.Writer) Writer { + return &varintWriter{w: w, lenBuf: make([]byte, binary.MaxVarintLen64)} +} + +type varintWriter struct { + w io.Writer + lenBuf []byte + buffer []byte +} + +func (w *varintWriter) WriteMsg(msg proto.Message) (_ int, err error) { + var data []byte + if m, ok := msg.(marshaler); ok { + n, ok := getSize(m) + if !ok { + data, err = proto.Marshal(msg) + if err != nil { + return 0, err + } + } + if n >= len(w.buffer) { + w.buffer = make([]byte, n) + } + _, err = m.MarshalTo(w.buffer) + if err != nil { + return 0, err + } + data = w.buffer[:n] + } else { + data, err = proto.Marshal(msg) + if err != nil { + return 0, err + } + } + length := uint64(len(data)) + n := binary.PutUvarint(w.lenBuf, length) + n, err = w.w.Write(w.lenBuf[:n]) + if err != nil { + return n, err + } + nd, err := w.w.Write(data) + return n + nd, err +} + +func NewReader(r io.Reader, maxSize int) Reader { + return &varintReader{r: bufio.NewReader(r), maxSize: maxSize} +} + +type varintReader struct { + r *bufio.Reader + buf []byte + maxSize int + + readLen bool + len int +} + +func (r *varintReader) readLength() error { + if r.readLen { + return nil + } + length64, err := binary.ReadUvarint(r.r) + if err != nil { + return err + } + length := int(length64) + r.readLen, r.len = true, length + return nil +} + +func (r *varintReader) SkipMsg() error { + if err := r.readLength(); err != nil { + return err + } + if r.len < 0 { + return io.ErrShortBuffer + } + r.readLen = false + if _, err := r.r.Discard(r.len); err != nil { + return err + } + return nil +} + +func (r *varintReader) ReadMsg(msg proto.Message) error { + if err := r.readLength(); err != nil { + return err + } + if r.len < 0 || r.len > r.maxSize { + return io.ErrShortBuffer + } + r.readLen = false + if len(r.buf) < r.len { + r.buf = make([]byte, r.len) + } + buf := r.buf[:r.len] + if _, err := io.ReadFull(r.r, buf); err != nil { + return err + } + return proto.Unmarshal(buf, msg) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/iter.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/iter.go new file mode 100644 index 0000000..0bd269c --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/iter.go @@ -0,0 +1,29 @@ +package query + +import ( + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +type Empty = nodes.Empty + +type IterOrder = nodes.IterOrder + +const ( + IterAny = nodes.IterAny + PreOrder = nodes.PreOrder + PostOrder = nodes.PostOrder + LevelOrder = nodes.LevelOrder + PositionOrder = LevelOrder + iota + 1 +) + +func NewIterator(root nodes.External, order IterOrder) Iterator { + if root == nil { + return Empty{} + } + switch order { + case PositionOrder: + return uast.NewPositionalIterator(root) + } + return nodes.NewIterator(root, order) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/query.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/query.go new file mode 100644 index 0000000..295cad6 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/query.go @@ -0,0 +1,37 @@ +package query + +import ( + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +type Interface interface { + // Prepare parses the query and prepares it for repeated execution. + Prepare(query string) (Query, error) + // Execute prepares and runs a query for a given subtree. + Execute(root nodes.External, query string) (Iterator, error) +} + +type Query interface { + // Execute runs a query for a given subtree. + Execute(root nodes.External) (Iterator, error) +} + +type Iterator = nodes.Iterator + +// AllNodes iterates over all nodes and returns them as a slice. +func AllNodes(it Iterator) []nodes.External { + var out []nodes.External + for it.Next() { + out = append(out, it.Node()) + } + return out +} + +// Count counts the nodes in the iterator. Iterator will be exhausted as a result. +func Count(it Iterator) int { + var n int + for it.Next() { + n++ + } + return n +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/xpath/query.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/xpath/query.go new file mode 100644 index 0000000..0b6275f --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/xpath/query.go @@ -0,0 +1,376 @@ +package xpath + +import ( + "fmt" + "strconv" + "strings" + + "github.com/antchfx/xpath" + + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/role" +) + +var _ xpath.NodeNavigator = &nodeNavigator{} + +// newNavigator creates a new xpath.nodeNavigator for the specified html.node. +func newNavigator(root nodes.External) *nodeNavigator { + n := &node{n: root, typ: rootNode} + return &nodeNavigator{root: n, cur: n, attri: -1} +} + +// A nodeType is the type of a node. +type nodeType uint + +const ( + // rootNode is a document object that, as the root of the document tree, + // provides access to the entire XML document. + rootNode nodeType = iota + // objectNode is an element. + objectNode + fieldNode + // valueNode is the text content of a node. + valueNode +) + +type attr struct { + key string + val string +} + +type node struct { + typ nodeType + + n nodes.External + kind nodes.Kind + obj nodes.ExternalObject + + tag [2]string + attrs []attr + sub []*node + par *node + parInd int // index in parent's sub array +} + +// nodeNavigator is for navigating JSON document. +type nodeNavigator struct { + root, cur *node + attri int +} + +func (a *nodeNavigator) Current() nodes.External { + return a.cur.n +} + +func (a *nodeNavigator) NodeType() xpath.NodeType { + if a.attri >= 0 { + return xpath.AttributeNode + } + switch a.cur.typ { + case valueNode: + return xpath.TextNode + case rootNode: + return xpath.RootNode + case objectNode, fieldNode: + return xpath.ElementNode + default: + panic(fmt.Errorf("unknown node type %v", a.cur.typ)) + } +} + +func (a *nodeNavigator) LocalName() string { + if a.attri >= 0 { + return a.cur.attrs[a.attri].key + } + return a.cur.tag[1] +} + +func (a *nodeNavigator) Prefix() string { + if a.attri >= 0 { + return "" + } + return a.cur.tag[0] +} + +func (a *nodeNavigator) Value() string { + if a.attri >= 0 { + return a.cur.attrs[a.attri].val + } + switch a.cur.typ { + case valueNode: + return nodes.ToString(a.cur.n.Value()) + } + return "" +} + +func (a *nodeNavigator) Copy() xpath.NodeNavigator { + n := *a + return &n +} + +func (a *nodeNavigator) MoveToRoot() { + a.cur = a.root + a.attri = -1 +} + +func (a *nodeNavigator) MoveToParent() bool { + n := a.cur.par + if n == nil { + return false + } + a.cur = n + return true +} + +func (x *nodeNavigator) MoveToNextAttribute() bool { + if x.cur.attrs == nil && x.cur.obj != nil { + x.cur.loadAttributes() + } + if x.attri+1 < len(x.cur.attrs) { + x.attri++ + return true + } + return false +} + +func (nd *node) loadAttributes() { + nd.attrs = []attr{} // indicate that attributes are loaded even if node has none + add := func(k, v string) { + nd.attrs = append(nd.attrs, attr{key: k, val: v}) + } + for _, k := range nd.obj.Keys() { + v, _ := nd.obj.ValueAt(k) + switch sub := v.(type) { + case nil: + add(k, "") + continue + case nodes.ExternalArray: + // project all array elements that are value to attributes + + isRoles := false + if k == uast.KeyRoles { + // special case for roles + k = "role" + isRoles = true + } + + sz := sub.Size() + for i := 0; i < sz; i++ { + vn := sub.ValueAt(i) + if vn == nil { + add(k, "") + continue + } + kind := vn.Kind() + if kind.In(nodes.KindsValues) { + v := vn.Value() + var av string + if isRoles && kind == nodes.KindInt { + // role id - convert to string + id, _ := v.(nodes.Int) + av = role.Role(id).String() + } else { + av = nodes.ToString(v) + } + add(k, av) + } + } + case nodes.ExternalObject: + if k != uast.KeyPos { + continue + } + // check for position nodes, expand to attributes + var pos uast.Positions + err := uast.NodeAs(sub, &pos) + if err != nil { + continue + } + for _, k := range pos.Keys() { + p := pos[k] + add(k+"-offset", strconv.FormatUint(uint64(p.Offset), 10)) + add(k+"-line", strconv.FormatUint(uint64(p.Line), 10)) + add(k+"-col", strconv.FormatUint(uint64(p.Col), 10)) + } + default: + if kind := v.Kind(); kind.In(nodes.KindsValues) { + val := v.Value() + if k == uast.KeyToken { + k = "token" + } + add(k, nodes.ToString(val)) + continue + } + } + } +} + +func (nd *node) loadChildren() { + // project fields + obj := nd.obj + keys := obj.Keys() + nd.sub = make([]*node, 0, len(keys)) + for _, k := range keys { + v, ok := obj.ValueAt(k) + if !ok { + continue + } + var vn *node + switch k { + case uast.KeyToken: + vn = toNode(v, "") + default: + vn = toNode(v, k) + } + vn.par = nd + vn.parInd = len(nd.sub) + nd.sub = append(nd.sub, vn) + } +} + +func toNode(n nodes.External, field string) *node { + if n == nil || n.Kind() == nodes.KindNil { + n = nodes.String("") // TODO + } + nd := &node{n: n, kind: n.Kind()} + + wrap := func(nd *node) *node { + if field == "" { + return nd + } + // wrap node into field-node + f := &node{ + n: nd.n, kind: nd.kind, + typ: fieldNode, tag: [2]string{"", field}, + sub: []*node{nd}, + } + nd.par = f + nd.parInd = 0 + return f + } + + switch nd.kind { + case nodes.KindNil: + return nil // TODO + case nodes.KindObject: + if typ := uast.TypeOf(n); typ != "" { + if i := strings.Index(typ, ":"); i >= 0 { + nd.tag = [2]string{typ[:i], typ[i+1:]} + } else { + nd.tag = [2]string{"", typ} + } + } + nd.obj, _ = nd.n.(nodes.ExternalObject) + nd.typ = objectNode + return wrap(nd) + case nodes.KindArray: + arr, _ := nd.n.(nodes.ExternalArray) + // array == sub nodes of this field + f := &node{ + n: nd.n, kind: nd.kind, + typ: fieldNode, tag: [2]string{"", field}, + } + if arr == nil { + f.sub = []*node{} + return f + } + sz := arr.Size() + f.sub = make([]*node, 0, sz) + for i := 0; i < sz; i++ { + v := arr.ValueAt(i) + s := toNode(v, "") + s.par = f + s.parInd = i + f.sub = append(f.sub, s) + } + return f + default: + // value + nd.typ = valueNode + return wrap(nd) + } +} + +func (a *nodeNavigator) MoveToChild() bool { + switch a.cur.typ { + case rootNode: + // return the same node, but without the root type + n := toNode(a.cur.n, "") + if n == nil { + return false + } + n.par = a.cur + a.cur = n + return true + case objectNode: + // node is an object, children are wrapped into a tag with the name = field + if a.cur.obj == nil { + return false + } + cur := a.cur + if cur.sub == nil { + cur.loadChildren() + } + if len(cur.sub) == 0 { + return false + } + a.cur = cur.sub[0] + return true + case fieldNode: + if len(a.cur.sub) == 0 { + return false + } + n := a.cur.sub[0] + if n == nil { + return false + } + a.cur = n + return true + } + return false +} + +func (a *nodeNavigator) isSub() bool { + return a.cur.par != nil && a.cur.parInd < len(a.cur.par.sub) +} +func (a *nodeNavigator) MoveToFirst() bool { + if a.isSub() { + par := a.cur.par + if n := par.sub[0]; n != nil { + a.cur = n + } + } + return true +} + +func (a *nodeNavigator) MoveToNext() bool { + if a.isSub() { + par := a.cur.par + if i := a.cur.parInd + 1; i < len(par.sub) { + a.cur = par.sub[i] + return true + } + } + return false +} + +func (a *nodeNavigator) MoveToPrevious() bool { + if a.isSub() { + par := a.cur.par + if i := a.cur.parInd - 1; i >= 0 && i < len(par.sub) { + a.cur = par.sub[i] + return true + } + } + return false +} + +func (a *nodeNavigator) MoveTo(other xpath.NodeNavigator) bool { + node, ok := other.(*nodeNavigator) + if !ok || node.root != a.root { + return false + } + a.cur = node.cur + a.attri = node.attri + return true +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/xpath/xpath.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/xpath/xpath.go new file mode 100644 index 0000000..986bb01 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/query/xpath/xpath.go @@ -0,0 +1,112 @@ +package xpath + +import ( + "fmt" + + "github.com/antchfx/xpath" + + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/query" +) + +func New() query.Interface { + return &index{} +} + +type index struct{} + +func (t *index) newNavigator(n nodes.External) xpath.NodeNavigator { + return newNavigator(n) +} + +func (t *index) Prepare(query string) (query.Query, error) { + exp, err := xpath.Compile(query) + if err != nil { + return nil, err + } + return &xQuery{idx: t, exp: exp}, nil +} + +func (t *index) Execute(root nodes.External, query string) (query.Iterator, error) { + q, err := t.Prepare(query) + if err != nil { + return nil, err + } + return q.Execute(root) +} + +type xQuery struct { + idx *index + exp *xpath.Expr +} + +func (q *xQuery) Execute(root nodes.External) (query.Iterator, error) { + nav := q.idx.newNavigator(root) + val := q.exp.Evaluate(nav) + if it, ok := val.(*xpath.NodeIterator); ok { + return &iterator{it: it}, nil + } + var v nodes.Value + switch val := val.(type) { + case bool: + v = nodes.Bool(val) + case float64: + if float64(int64(val)) == val { + v = nodes.Int(val) + } else { + v = nodes.Float(val) + } + case int: + v = nodes.Int(val) + case uint: + v = nodes.Uint(val) + case string: + v = nodes.String(val) + default: + return nil, fmt.Errorf("unsupported type: %T", val) + } + return &valIterator{val: v}, nil +} + +type valIterator struct { + state int + val nodes.Value +} + +func (it *valIterator) Next() bool { + switch it.state { + case 0: + it.state++ + return true + case 1: + it.state++ + } + return false +} + +func (it *valIterator) Node() nodes.External { + if it.state == 1 { + return it.val + } + return nil +} + +type iterator struct { + it *xpath.NodeIterator +} + +func (it *iterator) Next() bool { + return it.it.MoveNext() +} + +func (it *iterator) Node() nodes.External { + c := it.it.Current() + if c == nil { + return nil + } + nav := c.(*nodeNavigator) + if nav.cur == nil { + return nil + } + return nav.cur.n +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/generated.pb.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/generated.pb.go new file mode 100644 index 0000000..92bcbed --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/generated.pb.go @@ -0,0 +1,409 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gopkg.in/bblfsh/sdk.v2/uast/role/generated.proto + +/* + Package role is a generated protocol buffer package. + + It is generated from these files: + gopkg.in/bblfsh/sdk.v2/uast/role/generated.proto + + It has these top-level messages: +*/ +package role + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Role is the main UAST annotation. It indicates that a node in an AST can +// be interpreted as acting with certain language-independent role. +// +// go:generate stringer -type=Role +var Role_name = map[int32]string{ + 0: "INVALID", + 1: "IDENTIFIER", + 2: "QUALIFIED", + 3: "OPERATOR", + 4: "BINARY", + 5: "UNARY", + 6: "LEFT", + 7: "RIGHT", + 8: "INFIX", + 9: "POSTFIX", + 10: "BITWISE", + 11: "BOOLEAN", + 12: "UNSIGNED", + 13: "LEFT_SHIFT", + 14: "RIGHT_SHIFT", + 15: "OR", + 16: "XOR", + 17: "AND", + 18: "EXPRESSION", + 19: "STATEMENT", + 20: "EQUAL", + 21: "NOT", + 22: "LESS_THAN", + 23: "LESS_THAN_OR_EQUAL", + 24: "GREATER_THAN", + 25: "GREATER_THAN_OR_EQUAL", + 26: "IDENTICAL", + 27: "CONTAINS", + 28: "INCREMENT", + 29: "DECREMENT", + 30: "NEGATIVE", + 31: "POSITIVE", + 32: "DEREFERENCE", + 33: "TAKE_ADDRESS", + 34: "FILE", + 35: "ADD", + 36: "SUBSTRACT", + 37: "MULTIPLY", + 38: "DIVIDE", + 39: "MODULO", + 40: "PACKAGE", + 41: "DECLARATION", + 42: "IMPORT", + 43: "PATHNAME", + 44: "ALIAS", + 45: "FUNCTION", + 46: "BODY", + 47: "NAME", + 48: "RECEIVER", + 49: "ARGUMENT", + 50: "VALUE", + 51: "ARGS_LIST", + 52: "BASE", + 53: "IMPLEMENTS", + 54: "INSTANCE", + 55: "SUBTYPE", + 56: "SUBPACKAGE", + 57: "MODULE", + 58: "FRIEND", + 59: "WORLD", + 60: "IF", + 61: "CONDITION", + 62: "THEN", + 63: "ELSE", + 64: "SWITCH", + 65: "CASE", + 66: "DEFAULT", + 67: "FOR", + 68: "INITIALIZATION", + 69: "UPDATE", + 70: "ITERATOR", + 71: "WHILE", + 72: "DO_WHILE", + 73: "BREAK", + 74: "CONTINUE", + 75: "GOTO", + 76: "BLOCK", + 77: "SCOPE", + 78: "RETURN", + 79: "TRY", + 80: "CATCH", + 81: "FINALLY", + 82: "THROW", + 83: "ASSERT", + 84: "CALL", + 85: "CALLEE", + 86: "POSITIONAL", + 87: "NOOP", + 88: "LITERAL", + 89: "BYTE", + 90: "BYTE_STRING", + 91: "CHARACTER", + 92: "LIST", + 93: "MAP", + 94: "NULL", + 95: "NUMBER", + 96: "REGEXP", + 97: "SET", + 98: "STRING", + 99: "TUPLE", + 100: "TYPE", + 101: "ENTRY", + 102: "KEY", + 103: "PRIMITIVE", + 104: "ASSIGNMENT", + 105: "THIS", + 106: "COMMENT", + 107: "DOCUMENTATION", + 108: "WHITESPACE", + 109: "INCOMPLETE", + 110: "UNANNOTATED", + 111: "VISIBILITY", + 112: "ANNOTATION", + 113: "ANONYMOUS", + 114: "ENUMERATION", + 115: "ARITHMETIC", + 116: "RELATIONAL", + 117: "VARIABLE", +} +var Role_value = map[string]int32{ + "INVALID": 0, + "IDENTIFIER": 1, + "QUALIFIED": 2, + "OPERATOR": 3, + "BINARY": 4, + "UNARY": 5, + "LEFT": 6, + "RIGHT": 7, + "INFIX": 8, + "POSTFIX": 9, + "BITWISE": 10, + "BOOLEAN": 11, + "UNSIGNED": 12, + "LEFT_SHIFT": 13, + "RIGHT_SHIFT": 14, + "OR": 15, + "XOR": 16, + "AND": 17, + "EXPRESSION": 18, + "STATEMENT": 19, + "EQUAL": 20, + "NOT": 21, + "LESS_THAN": 22, + "LESS_THAN_OR_EQUAL": 23, + "GREATER_THAN": 24, + "GREATER_THAN_OR_EQUAL": 25, + "IDENTICAL": 26, + "CONTAINS": 27, + "INCREMENT": 28, + "DECREMENT": 29, + "NEGATIVE": 30, + "POSITIVE": 31, + "DEREFERENCE": 32, + "TAKE_ADDRESS": 33, + "FILE": 34, + "ADD": 35, + "SUBSTRACT": 36, + "MULTIPLY": 37, + "DIVIDE": 38, + "MODULO": 39, + "PACKAGE": 40, + "DECLARATION": 41, + "IMPORT": 42, + "PATHNAME": 43, + "ALIAS": 44, + "FUNCTION": 45, + "BODY": 46, + "NAME": 47, + "RECEIVER": 48, + "ARGUMENT": 49, + "VALUE": 50, + "ARGS_LIST": 51, + "BASE": 52, + "IMPLEMENTS": 53, + "INSTANCE": 54, + "SUBTYPE": 55, + "SUBPACKAGE": 56, + "MODULE": 57, + "FRIEND": 58, + "WORLD": 59, + "IF": 60, + "CONDITION": 61, + "THEN": 62, + "ELSE": 63, + "SWITCH": 64, + "CASE": 65, + "DEFAULT": 66, + "FOR": 67, + "INITIALIZATION": 68, + "UPDATE": 69, + "ITERATOR": 70, + "WHILE": 71, + "DO_WHILE": 72, + "BREAK": 73, + "CONTINUE": 74, + "GOTO": 75, + "BLOCK": 76, + "SCOPE": 77, + "RETURN": 78, + "TRY": 79, + "CATCH": 80, + "FINALLY": 81, + "THROW": 82, + "ASSERT": 83, + "CALL": 84, + "CALLEE": 85, + "POSITIONAL": 86, + "NOOP": 87, + "LITERAL": 88, + "BYTE": 89, + "BYTE_STRING": 90, + "CHARACTER": 91, + "LIST": 92, + "MAP": 93, + "NULL": 94, + "NUMBER": 95, + "REGEXP": 96, + "SET": 97, + "STRING": 98, + "TUPLE": 99, + "TYPE": 100, + "ENTRY": 101, + "KEY": 102, + "PRIMITIVE": 103, + "ASSIGNMENT": 104, + "THIS": 105, + "COMMENT": 106, + "DOCUMENTATION": 107, + "WHITESPACE": 108, + "INCOMPLETE": 109, + "UNANNOTATED": 110, + "VISIBILITY": 111, + "ANNOTATION": 112, + "ANONYMOUS": 113, + "ENUMERATION": 114, + "ARITHMETIC": 115, + "RELATIONAL": 116, + "VARIABLE": 117, +} + +func (Role) EnumDescriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func init() { + proto.RegisterEnum("gopkg.in.bblfsh.sdk.v1.uast.role.Role", Role_name, Role_value) +} + +func init() { + proto.RegisterFile("gopkg.in/bblfsh/sdk.v2/uast/role/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1939 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x57, 0xe9, 0x92, 0xdb, 0xc6, + 0x11, 0x5e, 0x49, 0xab, 0x3d, 0xa0, 0xab, 0x4d, 0xcb, 0x8a, 0x0c, 0xcb, 0x34, 0x9d, 0x38, 0x4e, + 0x62, 0xc7, 0xbb, 0x56, 0x9c, 0xfb, 0x1e, 0x02, 0x43, 0x72, 0xb2, 0xe0, 0x00, 0x02, 0x06, 0xdc, + 0xa5, 0x72, 0x6c, 0x40, 0x72, 0x96, 0x9c, 0x2c, 0x08, 0xd0, 0x38, 0x24, 0x6d, 0x9e, 0x20, 0xc5, + 0x27, 0x48, 0xa5, 0x8a, 0x55, 0xa9, 0x8a, 0x7f, 0xe4, 0x31, 0xf2, 0x33, 0x3f, 0xf3, 0x08, 0x29, + 0xf9, 0x45, 0x5c, 0x33, 0x0d, 0xca, 0xfa, 0x47, 0xf6, 0x35, 0xdd, 0xfd, 0xf5, 0xd7, 0x33, 0xb0, + 0x3e, 0x9d, 0xe7, 0xab, 0xcb, 0xf9, 0x91, 0xca, 0x8e, 0x27, 0x93, 0xf4, 0xa2, 0x5c, 0x1c, 0x97, + 0xb3, 0xcb, 0xa3, 0x67, 0x8f, 0x8f, 0xeb, 0xa4, 0xac, 0x8e, 0x8b, 0x3c, 0x95, 0xc7, 0x73, 0x99, + 0xc9, 0x22, 0xa9, 0xe4, 0xec, 0x68, 0x55, 0xe4, 0x55, 0xde, 0xea, 0x6c, 0x3d, 0x8e, 0xd0, 0xe3, + 0x08, 0x3d, 0x8e, 0xb4, 0xc7, 0x91, 0xf6, 0xb0, 0x3f, 0x99, 0xab, 0x6a, 0x51, 0x4f, 0x8e, 0xa6, + 0xf9, 0xf2, 0x78, 0x9e, 0xcf, 0xf3, 0x63, 0xe3, 0x38, 0xa9, 0x2f, 0xcc, 0x3f, 0xf3, 0xc7, 0xfc, + 0xc2, 0x80, 0x1f, 0xfd, 0xe3, 0xa1, 0xb5, 0x1b, 0xe6, 0xa9, 0x6c, 0x3d, 0xb4, 0xf6, 0x19, 0x1f, + 0x11, 0x8f, 0xb9, 0xb0, 0x63, 0xdf, 0x5a, 0x6f, 0x3a, 0xfb, 0x2c, 0x7b, 0x96, 0xa4, 0x6a, 0xd6, + 0x6a, 0x5b, 0x16, 0x73, 0x29, 0x17, 0xac, 0xc7, 0x68, 0x08, 0xd7, 0xec, 0xbb, 0xeb, 0x4d, 0xc7, + 0x62, 0x33, 0x99, 0x55, 0xea, 0x42, 0xc9, 0xa2, 0xf5, 0xc8, 0x3a, 0x7c, 0x12, 0x13, 0x4f, 0xab, + 0x5d, 0xb8, 0x6e, 0xdf, 0x59, 0x6f, 0x3a, 0x87, 0x4f, 0xea, 0x24, 0xd5, 0xda, 0x59, 0xcb, 0xb6, + 0x0e, 0xfc, 0x80, 0x86, 0x44, 0xf8, 0x21, 0xdc, 0xb0, 0x6f, 0xaf, 0x37, 0x9d, 0x03, 0x7f, 0xa5, + 0x6b, 0xca, 0x8b, 0xd6, 0x03, 0x6b, 0xaf, 0xcb, 0x38, 0x09, 0xc7, 0xb0, 0x6b, 0x5b, 0xeb, 0x4d, + 0x67, 0xaf, 0xab, 0xb2, 0xa4, 0xb8, 0x6a, 0xdd, 0xb7, 0x6e, 0xc6, 0x46, 0x7c, 0xd3, 0x3e, 0x5c, + 0x6f, 0x3a, 0x37, 0x63, 0x23, 0x6d, 0x59, 0xbb, 0x1e, 0xed, 0x09, 0xd8, 0xb3, 0x0f, 0xd6, 0x9b, + 0xce, 0xae, 0x27, 0x2f, 0x2a, 0x6d, 0x19, 0xb2, 0xfe, 0x40, 0xc0, 0x3e, 0x5a, 0x86, 0x6a, 0xbe, + 0x30, 0x52, 0xc6, 0x7b, 0xec, 0x0c, 0x0e, 0x50, 0xca, 0xb2, 0x0b, 0xf5, 0x42, 0x57, 0x18, 0xf8, + 0x91, 0xd0, 0xf2, 0x43, 0xac, 0x30, 0xc8, 0xcb, 0xaa, 0xd1, 0x74, 0x99, 0x38, 0x65, 0x11, 0x05, + 0x0b, 0x35, 0x5d, 0x55, 0x3d, 0x57, 0xa5, 0xe9, 0x4a, 0xd7, 0xf7, 0x3d, 0x4a, 0x38, 0xdc, 0x6a, + 0x34, 0x79, 0x9e, 0xca, 0x24, 0xd3, 0x75, 0xc5, 0x3c, 0x62, 0x7d, 0x4e, 0x5d, 0xb8, 0x8d, 0x75, + 0xc5, 0x59, 0xa9, 0xe6, 0x99, 0x9c, 0xb5, 0xde, 0xb5, 0x2c, 0x9d, 0xe9, 0x79, 0x34, 0x60, 0x3d, + 0x01, 0x77, 0xb0, 0x25, 0x3a, 0xdf, 0x68, 0xa1, 0x2e, 0xaa, 0xd6, 0x7b, 0xd6, 0x2d, 0x93, 0x74, + 0xa3, 0xbf, 0x8b, 0x1d, 0x35, 0xa9, 0xa3, 0xc1, 0x5d, 0xeb, 0xba, 0x1f, 0xc2, 0x3d, 0x7b, 0x6f, + 0xbd, 0xe9, 0x5c, 0xf7, 0x8b, 0x16, 0x58, 0x37, 0xce, 0xfc, 0x10, 0xc0, 0xde, 0x5f, 0x6f, 0x3a, + 0x37, 0xce, 0x72, 0x23, 0x21, 0xdc, 0x85, 0x37, 0x50, 0x42, 0x32, 0x83, 0x12, 0x3d, 0x0b, 0x42, + 0x1a, 0x45, 0xcc, 0xe7, 0xd0, 0xc2, 0x98, 0xf4, 0xc5, 0xaa, 0x90, 0x65, 0xa9, 0xf2, 0x4c, 0xa3, + 0x14, 0x09, 0x22, 0xe8, 0x90, 0x72, 0x01, 0x6f, 0x62, 0x4a, 0x51, 0x95, 0x54, 0x72, 0x29, 0x33, + 0xd3, 0x31, 0xaa, 0x41, 0x84, 0xfb, 0xd8, 0x31, 0xfa, 0x79, 0x9d, 0xa4, 0xfa, 0x14, 0xee, 0x0b, + 0x78, 0x0b, 0x4f, 0xe1, 0x79, 0xd5, 0x7a, 0xc7, 0x3a, 0xf4, 0x68, 0x14, 0x9d, 0x8b, 0x01, 0xe1, + 0xf0, 0x00, 0xcb, 0xf6, 0x64, 0x59, 0x8a, 0x45, 0x92, 0xb5, 0x3e, 0xb6, 0x5a, 0xaf, 0x94, 0xe7, + 0x7e, 0x78, 0x8e, 0x11, 0xbf, 0x61, 0xbf, 0xb9, 0xde, 0x74, 0xee, 0x6d, 0xad, 0xfc, 0x02, 0x63, + 0xbf, 0x6f, 0xdd, 0xee, 0x87, 0x94, 0x08, 0x1a, 0x62, 0xb0, 0x87, 0xf6, 0xbd, 0xf5, 0xa6, 0x73, + 0xab, 0x5f, 0xc8, 0xa4, 0x92, 0x85, 0x89, 0xf7, 0xd8, 0x7a, 0xeb, 0x75, 0x93, 0xaf, 0x43, 0xbe, + 0x6d, 0x3f, 0x58, 0x6f, 0x3a, 0xad, 0xd7, 0x6c, 0xb7, 0x51, 0x1f, 0x59, 0x87, 0x38, 0xab, 0x0e, + 0xf1, 0xc0, 0xc6, 0x2a, 0x71, 0x54, 0xa7, 0x49, 0xaa, 0x31, 0x73, 0x7c, 0x2e, 0x08, 0xe3, 0x11, + 0xbc, 0x83, 0xc9, 0x3b, 0x79, 0x56, 0x25, 0x2a, 0x2b, 0x8d, 0x27, 0x77, 0x42, 0xec, 0xcf, 0xa3, + 0xc6, 0x33, 0x9b, 0x16, 0xd8, 0x9f, 0x47, 0xd6, 0xa1, 0x4b, 0xb7, 0xda, 0x77, 0x51, 0xeb, 0xca, + 0xad, 0xd6, 0xb6, 0x0e, 0x38, 0xed, 0x13, 0xc1, 0x46, 0x14, 0xda, 0x18, 0x97, 0xcb, 0x79, 0x52, + 0xa9, 0x67, 0x52, 0xeb, 0x02, 0x3f, 0x62, 0x46, 0xf7, 0x1e, 0xea, 0x82, 0xbc, 0x54, 0x46, 0xd7, + 0xb1, 0x6e, 0xb9, 0x34, 0xa4, 0x3d, 0x1a, 0x52, 0xee, 0x50, 0xe8, 0x60, 0x0b, 0x5c, 0x59, 0xc8, + 0x0b, 0x59, 0xc8, 0x6c, 0x2a, 0x75, 0x97, 0x04, 0x39, 0xa1, 0xe7, 0xc4, 0x75, 0x35, 0xb4, 0xf0, + 0x3e, 0x9a, 0x88, 0xe4, 0x52, 0x92, 0xd9, 0x4c, 0x63, 0xab, 0x69, 0xd1, 0x63, 0x1e, 0x85, 0x6f, + 0x22, 0x2d, 0x7a, 0x2a, 0x95, 0x66, 0x3c, 0x5c, 0x17, 0xbe, 0xd5, 0x8c, 0xc7, 0x6c, 0x66, 0xe0, + 0x8f, 0xbb, 0x91, 0x08, 0x89, 0x23, 0xe0, 0x83, 0x06, 0xfe, 0x7a, 0x52, 0x56, 0x45, 0x32, 0x35, + 0x05, 0x0c, 0x63, 0x4f, 0xb0, 0xc0, 0x1b, 0xc3, 0xb7, 0x31, 0xc9, 0x61, 0x9d, 0x56, 0x6a, 0x95, + 0x5e, 0x69, 0x92, 0xba, 0x6c, 0xc4, 0x5c, 0x0a, 0x1f, 0x22, 0x49, 0x5d, 0xf5, 0x4c, 0xcd, 0xa4, + 0x96, 0x0f, 0x7d, 0x37, 0xf6, 0x7c, 0xf8, 0x0e, 0xca, 0x87, 0xf9, 0xac, 0x4e, 0x73, 0x43, 0x33, + 0xe2, 0x9c, 0x90, 0x3e, 0x85, 0xef, 0x36, 0x34, 0x4b, 0xa6, 0x97, 0xc9, 0xbc, 0x29, 0xd7, 0xf1, + 0x48, 0x48, 0x84, 0x9e, 0xd1, 0xef, 0x6d, 0xcb, 0x9d, 0xa6, 0x49, 0x91, 0x54, 0x7a, 0x48, 0x1f, + 0x58, 0x7b, 0x6c, 0x18, 0xf8, 0xa1, 0x80, 0x8f, 0x30, 0x26, 0x5b, 0xae, 0xf2, 0xc2, 0xe4, 0x17, + 0x10, 0x31, 0xe0, 0x64, 0x48, 0xe1, 0xe3, 0xa6, 0x89, 0x49, 0xb5, 0xc8, 0x92, 0xa5, 0xd4, 0xa3, + 0x4b, 0x3c, 0x46, 0x22, 0xf8, 0x3e, 0x8e, 0x2e, 0x49, 0x55, 0x52, 0x6a, 0x8f, 0x5e, 0xcc, 0x1d, + 0x73, 0xd0, 0x27, 0xe8, 0xd1, 0xab, 0xb3, 0xa9, 0x39, 0xa5, 0x65, 0xed, 0x76, 0x7d, 0x77, 0x0c, + 0x47, 0xd8, 0xb1, 0x6e, 0x3e, 0x33, 0xcb, 0xc5, 0x44, 0x3f, 0x46, 0x19, 0xd7, 0x91, 0x6d, 0xeb, + 0x20, 0xa4, 0x0e, 0x65, 0x23, 0x1a, 0xc2, 0xa7, 0x18, 0x23, 0x94, 0x53, 0xa9, 0x9e, 0xc9, 0x42, + 0xeb, 0x48, 0xd8, 0x8f, 0xcd, 0x3c, 0x3c, 0x46, 0x1d, 0x29, 0xe6, 0xf5, 0x96, 0x4c, 0x23, 0xe2, + 0xc5, 0x14, 0x7e, 0x80, 0x19, 0x8d, 0x92, 0xb4, 0x96, 0x9a, 0x3a, 0x24, 0xec, 0x47, 0xe7, 0x1e, + 0x8b, 0x04, 0x7c, 0xf6, 0xca, 0xa5, 0xf4, 0x54, 0x59, 0x99, 0x94, 0x48, 0x44, 0xe1, 0x87, 0x4d, + 0x4a, 0x49, 0x29, 0xcd, 0xde, 0x1d, 0x06, 0x9e, 0x99, 0xb9, 0x08, 0x7e, 0xd4, 0xec, 0xdd, 0xe5, + 0x2a, 0x35, 0x43, 0x67, 0x4a, 0x64, 0x3c, 0x12, 0x44, 0x8f, 0xce, 0x8f, 0x31, 0x1e, 0xcb, 0xca, + 0x2a, 0xd1, 0x73, 0xf3, 0xd0, 0xda, 0x8f, 0xe2, 0xae, 0x18, 0x07, 0x14, 0x7e, 0x82, 0x20, 0x44, + 0xf5, 0xa4, 0xba, 0x5a, 0x99, 0xa8, 0x51, 0xdc, 0xdd, 0x22, 0xf4, 0x53, 0x8c, 0x1a, 0xd5, 0x93, + 0x55, 0x03, 0xd2, 0x16, 0x56, 0x0a, 0x3f, 0x7b, 0x0d, 0x56, 0x23, 0xef, 0x85, 0x8c, 0x72, 0x17, + 0x7e, 0x8e, 0xf2, 0x5e, 0xa1, 0x64, 0x36, 0xd3, 0xc5, 0x9e, 0xfa, 0xa1, 0xe7, 0xc2, 0x2f, 0xb0, + 0xd8, 0xd3, 0xbc, 0x48, 0x67, 0x7a, 0x83, 0xb1, 0x1e, 0xfc, 0x12, 0x37, 0x18, 0xbb, 0xd0, 0xe3, + 0xe7, 0xf8, 0xdc, 0x65, 0x06, 0x8f, 0x5f, 0xe1, 0xf8, 0x39, 0x79, 0x36, 0x53, 0x5b, 0x40, 0xc4, + 0x80, 0x72, 0xf8, 0x35, 0x56, 0x2f, 0x16, 0xd2, 0xc8, 0xa8, 0x17, 0x51, 0xf8, 0x0d, 0xca, 0x68, + 0x5a, 0x9a, 0x1c, 0xa2, 0x53, 0x26, 0x9c, 0x01, 0xfc, 0x16, 0x73, 0x88, 0x9e, 0xab, 0x6a, 0xba, + 0xd0, 0xb6, 0x8e, 0xee, 0x1e, 0x41, 0x5b, 0x27, 0xc1, 0xcd, 0xed, 0xd2, 0x1e, 0x89, 0x3d, 0x01, + 0x5d, 0xec, 0x80, 0x2b, 0x2f, 0x92, 0x3a, 0xad, 0x34, 0x39, 0x7a, 0x7e, 0x08, 0x0e, 0x92, 0xa3, + 0x97, 0x17, 0xad, 0x0f, 0xad, 0xbb, 0x8c, 0x33, 0xc1, 0x88, 0xc7, 0x9e, 0xe2, 0x6c, 0xba, 0x76, + 0x6b, 0xbd, 0xe9, 0xdc, 0x65, 0x99, 0xaa, 0x54, 0x92, 0xaa, 0xbf, 0xbe, 0x1a, 0xcf, 0x38, 0x70, + 0x89, 0xa0, 0x40, 0xf1, 0xfc, 0x78, 0x35, 0x4b, 0x2a, 0x33, 0x28, 0x4c, 0x34, 0x77, 0x5c, 0xaf, + 0x41, 0xa2, 0x6a, 0xee, 0x38, 0xdd, 0x9f, 0x81, 0xe6, 0x67, 0xbf, 0xe9, 0xcf, 0x42, 0x13, 0xf4, + 0x6d, 0xeb, 0xc0, 0xf5, 0xcf, 0x51, 0x31, 0x68, 0xd2, 0xcb, 0x51, 0x75, 0xdf, 0xba, 0xd9, 0x0d, + 0x29, 0x39, 0x01, 0x86, 0x0e, 0xdd, 0x42, 0x26, 0x97, 0xdb, 0xd5, 0xc5, 0x78, 0x4c, 0xe1, 0x77, + 0x5f, 0xaf, 0x2e, 0x95, 0xd5, 0x52, 0x97, 0xdf, 0xf7, 0x85, 0x0f, 0x27, 0x58, 0x7e, 0x5f, 0x3f, + 0x14, 0x74, 0x14, 0xcf, 0x77, 0x4e, 0xc0, 0x6b, 0xa2, 0xa4, 0xf9, 0xf4, 0x52, 0x4b, 0x23, 0xc7, + 0x0f, 0x28, 0x0c, 0x51, 0x1a, 0x4d, 0xf3, 0x95, 0x69, 0x6b, 0x48, 0x45, 0x1c, 0x72, 0xe0, 0x58, + 0x56, 0x28, 0xab, 0xba, 0xc8, 0x74, 0xa3, 0x44, 0x38, 0x06, 0x1f, 0x1b, 0x25, 0xf0, 0x62, 0x76, + 0x88, 0xee, 0x7f, 0x80, 0xfe, 0x4e, 0xa2, 0xdb, 0xff, 0xd0, 0xda, 0xef, 0x31, 0x4e, 0x3c, 0x6f, + 0x0c, 0x4f, 0xb0, 0x96, 0x9e, 0xca, 0x92, 0x34, 0x35, 0xf6, 0x62, 0x10, 0xfa, 0xa7, 0x10, 0xa2, + 0xbd, 0x58, 0x14, 0xf9, 0x73, 0x7d, 0x1e, 0x89, 0x22, 0x1a, 0x0a, 0x88, 0xf0, 0x3c, 0x52, 0x96, + 0xb2, 0xa8, 0x10, 0x46, 0xcf, 0x03, 0xb1, 0x85, 0x31, 0x4d, 0xb5, 0xad, 0x96, 0x51, 0x0a, 0x31, + 0xda, 0x6a, 0xa9, 0x34, 0x63, 0x8c, 0x6b, 0xd5, 0xe7, 0xc4, 0x83, 0x11, 0x8e, 0x31, 0x2e, 0xd6, + 0x3c, 0x4b, 0x52, 0xc3, 0x67, 0xdf, 0x0f, 0xe0, 0xb4, 0xe1, 0x73, 0x9e, 0xaf, 0x74, 0x9e, 0x9e, + 0xc1, 0xc9, 0x83, 0x33, 0xcc, 0xd3, 0x53, 0x1a, 0x26, 0x63, 0xdd, 0x1d, 0x0b, 0x0a, 0xe3, 0x86, + 0x7e, 0x57, 0x95, 0xd4, 0xb7, 0xb4, 0x96, 0x9d, 0x47, 0x22, 0x64, 0xbc, 0x0f, 0x4f, 0xf1, 0x08, + 0xad, 0x8a, 0xaa, 0x42, 0x65, 0x73, 0x33, 0xd3, 0x03, 0xa2, 0x17, 0x2a, 0x0d, 0xe1, 0xf7, 0xcd, + 0x4c, 0x2f, 0x12, 0xbd, 0x50, 0x65, 0x61, 0x5e, 0x2b, 0x9a, 0xe9, 0x7f, 0x68, 0x5e, 0x2b, 0x9a, + 0xe5, 0x60, 0xdd, 0x18, 0x92, 0x00, 0xfe, 0x88, 0x0d, 0x1d, 0x26, 0x2b, 0x93, 0x66, 0xec, 0x79, + 0xf0, 0xa7, 0x26, 0xcd, 0x1a, 0x4b, 0xe6, 0xf1, 0xb0, 0x4b, 0x43, 0x38, 0xc7, 0x92, 0x79, 0xbd, + 0x9c, 0xc8, 0x02, 0x61, 0xea, 0xd3, 0xb3, 0x00, 0xfe, 0xbc, 0x85, 0x69, 0x2e, 0x5f, 0xac, 0x74, + 0xd4, 0x88, 0x0a, 0x48, 0x30, 0x6a, 0x24, 0x2b, 0xc3, 0x13, 0xcc, 0x7a, 0xd2, 0xf0, 0x04, 0x33, + 0xd6, 0x70, 0xc4, 0x81, 0x47, 0x61, 0xda, 0xc0, 0x51, 0xaf, 0x52, 0x33, 0x3e, 0x66, 0x51, 0xcc, + 0x1a, 0xf6, 0xe9, 0x2d, 0xa1, 0xdf, 0x03, 0x5c, 0x83, 0x2f, 0x9b, 0xf7, 0x40, 0x56, 0x15, 0x57, + 0xfa, 0xa4, 0x13, 0x3a, 0x86, 0x0b, 0x3c, 0xe9, 0x44, 0x5e, 0xe9, 0x1e, 0x04, 0x21, 0x1b, 0xe2, + 0xf5, 0x36, 0xc7, 0x1e, 0x04, 0x85, 0x5a, 0xe2, 0xfd, 0xd6, 0xb6, 0x2c, 0x12, 0xe9, 0x37, 0x92, + 0x59, 0x93, 0x0b, 0xec, 0x20, 0x29, 0xf5, 0x2b, 0xc9, 0x2c, 0x4a, 0xc3, 0x7b, 0x16, 0x81, 0xda, + 0xf2, 0x5e, 0x95, 0x1a, 0x24, 0xc7, 0x1f, 0x1a, 0x87, 0xbf, 0x20, 0x48, 0x4e, 0xbe, 0x34, 0xd6, + 0x1f, 0x58, 0x77, 0x5c, 0xdf, 0x31, 0x2b, 0x17, 0x49, 0x7a, 0x69, 0xbf, 0xb1, 0xde, 0x74, 0xee, + 0xb8, 0xf9, 0xd4, 0xec, 0x5d, 0xe4, 0x68, 0xdb, 0xb2, 0x4e, 0x07, 0x4c, 0xd0, 0x28, 0x20, 0x0e, + 0x85, 0x14, 0xcf, 0x3c, 0x5d, 0xa8, 0x4a, 0x96, 0xab, 0x64, 0x8a, 0x5b, 0x95, 0x3b, 0xbe, 0x5e, + 0xac, 0x82, 0xc2, 0xb2, 0xd9, 0xaa, 0xd9, 0x34, 0xd7, 0x8b, 0xb5, 0x32, 0x97, 0x54, 0xcc, 0x09, + 0xe7, 0xbe, 0x7e, 0x2c, 0xb9, 0x90, 0xe1, 0x25, 0x15, 0x67, 0x49, 0x96, 0xe5, 0xfa, 0xb9, 0x64, + 0x5e, 0x5a, 0x23, 0x16, 0xb1, 0x2e, 0xf3, 0x98, 0x18, 0x43, 0x8e, 0x11, 0x46, 0xaa, 0x54, 0x13, + 0x95, 0xaa, 0xea, 0xca, 0x54, 0x8d, 0xfe, 0x3a, 0xc9, 0x55, 0x53, 0x35, 0xba, 0x37, 0x2f, 0x31, + 0xc2, 0x7d, 0x3e, 0x1e, 0xfa, 0x71, 0x04, 0x9f, 0x63, 0xcf, 0x48, 0x96, 0x67, 0x57, 0xcb, 0xbc, + 0x2e, 0xf5, 0xf9, 0x94, 0xc7, 0x43, 0xda, 0x5c, 0x92, 0x05, 0x9e, 0x4f, 0xb3, 0x7a, 0x29, 0x8b, + 0x57, 0x15, 0x92, 0x90, 0x89, 0xc1, 0x90, 0x0a, 0xe6, 0x40, 0xd9, 0xc4, 0x2f, 0x54, 0xb5, 0x58, + 0xca, 0x4a, 0x4d, 0xb5, 0x3e, 0xa4, 0x1e, 0x69, 0xa8, 0x51, 0x35, 0xaf, 0x4b, 0x99, 0x26, 0x0d, + 0x35, 0x6c, 0xeb, 0x60, 0x44, 0x42, 0x46, 0xba, 0x1e, 0x85, 0x1a, 0x57, 0xc9, 0x28, 0x29, 0x54, + 0x32, 0x49, 0xa5, 0x7d, 0xfb, 0x6f, 0xff, 0x6a, 0xef, 0xfc, 0xfb, 0x8b, 0xf6, 0xce, 0x7f, 0xbe, + 0x68, 0xef, 0x74, 0xed, 0xff, 0xbe, 0x6c, 0x5f, 0xfb, 0xdf, 0xcb, 0xf6, 0xb5, 0xff, 0xbf, 0x6c, + 0xef, 0xfc, 0xfd, 0xcb, 0xf6, 0xce, 0x3f, 0xbf, 0x6c, 0x5f, 0x7b, 0xba, 0xab, 0xbf, 0x33, 0x26, + 0x7b, 0xe6, 0xfb, 0xe1, 0xb3, 0xaf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd5, 0xc0, 0xe4, 0xc7, 0xc4, + 0x0c, 0x00, 0x00, +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/role.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/role.go new file mode 100644 index 0000000..5c30a37 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/role.go @@ -0,0 +1,443 @@ +package role + +//go:generate proteus -f $GOPATH/src -p gopkg.in/bblfsh/sdk.v2/uast/role + +// Role is the main UAST annotation. It indicates that a node in an AST can +// be interpreted as acting with certain language-independent role. +// +//proteus:generate +//go:generate stringer -type=Role +type Role int16 + +// Valid checks if this role was defined. As an exception, it returns false for an Invalid role. +func (i Role) Valid() bool { + return i > 0 && i < Role(len(_Role_index)-1) +} + +// Roles is an ordered list of roles. +type Roles []Role + +var lookupRole = make(map[string]Role) + +func init() { + for i := 0; i < len(_Role_index)-1; i++ { + s, e := _Role_index[i], _Role_index[i+1] + lookupRole[_Role_name[s:e]] = Role(i) + } +} + +// FromString converts a string representation of the Role to its numeric value. +func FromString(s string) Role { + r, ok := lookupRole[s] + if !ok { + return Invalid + } + return r +} + +const ( + // Invalid Role is assigned as a zero value since protobuf enum definition must start at 0. + Invalid Role = iota + + // Identifier is any form of identifier, used for variable names, functions, packages, etc. + Identifier + + // Qualified is a kind of property identifiers may have, when it's composed + // of multiple simple identifiers. + Qualified + + // Operator is any form of operator. + Operator + + // Binary is any form of binary operator, in contrast with unary operators. + Binary + + // Unary is any form of unary operator, in contrast with binary operators. + Unary + + // Left is a left hand side in a binary expression. + Left + + // Right is a right hand side if a binary expression. + Right + + // Infix should mark the nodes which are parents of expression nodes using infix notation, e.g.: a+b. + // Nodes without Infix or Postfix mark are considered in prefix order by default. + Infix + + // Postfix should mark the nodes which are parents of nodes using postfix notation, e.g.: ab+. + // Nodes without Infix or Postfix mark are considered in prefix order by default. + Postfix + + // Bitwise is any form of bitwise operation. + Bitwise + + // Boolean is any form of boolean operation. + Boolean + + // Unsigned is an form of unsigned operation. + Unsigned + + // LeftShift is a left shift operation (i.e. `<<`, `rol`, etc.) + LeftShift + + // RightShift is a right shift operation (i.e. `>>`, `ror`, etc.) + RightShift + + // Or is an OR operation (i.e. `||`, `or`, `|`, etc.) + Or + + // Xor is an exclusive OR operation (i.e. `~`, `^`, etc.) + Xor + + // And is an AND operation (i.e. `&&`, `&`, `and`, etc.) + And + + // Expression is a construct computed to produce some value. + Expression + + // Statement is some action to be carried out. + Statement + + // Equal is an eaquality predicate (i.e. `=`, `==`, etc.) + Equal + + // Not is a negation operation. It may be used to annotate a complement of an operator. + Not + + // LessThan is a comparison predicate that checks if the lhs value is smaller than the rhs value (i. e. `<`.) + LessThan + + // LessThanOrEqual is a comparison predicate that checks if the lhs value is smaller or equal to the rhs value (i.e. `<=`.) + LessThanOrEqual + + // GreaterThan is a comparison predicate that checks if the lhs value is greather than the rhs value (i. e. `>`.) + GreaterThan + + // GreaterThanOrEqual is a comparison predicate that checks if the lhs value is greather than or equal to the rhs value (i.e. 1>=`.) + GreaterThanOrEqual + + // Identical is an identity predicate (i. e. `===`, `is`, etc.) + Identical + + // Contains is a membership predicate that checks if the lhs value is a member of the rhs container (i.e. `in` in Python.) + Contains + + // Increment is an arithmetic operator that increments a value (i. e. `++i`.) + Increment + + // Decrement is an arithmetic operator that decrements a value (i. e. `--i`.) + Decrement + + // Negative is an arithmetic operator that negates a value (i.e. `-x`.) + Negative + + // Positive is an arithmetic operator that makes a value positive. It's usually redundant (i.e. `+x`.) + Positive + + // Dereference is an operation that gets the actual value of a pointer or reference (i.e. `*x`.) + Dereference + + // TakeAddress is an operation that gets the memory address of a value (i. e. `&x`.) + TakeAddress + + // File is the root node of a single file AST. + File + + // Add is an arithmetic operator (i.e. `+`.) + Add + + // Substract in an arithmetic operator (i.e. `-`.) + Substract + + // Multiply is an arithmetic operator (i.e. `*`.) + Multiply + + // Divide is an arithmetic operator (i.e. `/`.) + Divide + + // Modulo is an arithmetic operator (i.e. `%`, `mod`, etc.) + Modulo + + // Package indicates that a package level property. + Package + + // Declaration is a construct to specify properties of an identifier. + Declaration + + // Import indicates an import level property. + Import + + // Pathname is a qualified name of some construct. + Pathname + + // Alias is an alternative name for some construct. + Alias + + // Function is a sequence of instructions packaged as a unit. + Function + + // Body is a sequence of instructions in a block. + Body + + // Name is an identifier used to reference a value. + Name + + // Receiver is the target of a construct (message, function, etc.) + Receiver + + // Argument is variable used as input/output in a function. + Argument + + // Value is an expression that cannot be evaluated any further. + Value + + // ArgsList is variable number of arguments (i.e. `...`, `Object...`, `*args`, etc.) + ArgsList + + // Base is the parent type of which another type inherits. + Base + + // Implements is the type (usually an interface) that another type implements. + Implements + + // Instance is a concrete occurrence of an object. + Instance + + // Subtype is a type that can be used to substitute another type. + Subtype + + // Subpackage is a package that is below another package in the hierarchy. + Subpackage + + // Module is a set of funcitonality grouped. + Module + + // Friend is an access granter for some private resources. + Friend + + // World is a set of every component. + World + + // If is used for if-then[-else] statements or expressions. + // An if-then tree will look like: + // + // If, Statement { + // **[non-If nodes] { + // If, Condition { + // [...] + // } + // } + // **[non-If* nodes] { + // If, Then { + // [...] + // } + // } + // **[non-If* nodes] { + // If, Else { + // [...] + // } + // } + // } + // + // The Else node is optional. The order of Condition, Then and + // Else is not defined. + If + + // Condition is a condition in an IfStatement or IfExpression. + Condition + + // Then is the clause executed when the Condition is true. + Then + + // Else is the clause executed when the Condition is false. + Else + + // Switch is used to represent a broad of switch flavors. An expression + // is evaluated and then compared to the values returned by different + // case expressions, executing a body associated to the first case that + // matches. Similar constructions that go beyond expression comparison + // (such as pattern matching in Scala's match) should not be annotated + // with Switch. + Switch + + // Case is a clause whose expression is compared with the condition. + Case + + // Default is a clause that is called when no other clause is matches. + Default + + // For is a loop with an initialization, a condition, an update and a body. + For + + // Initialization is the assignment of an initial value to a variable + // (i.e. a for loop variable initialization.) + Initialization + + // Update is the assignment of a new value to a variable + // (i.e. a for loop variable update.) + Update + + // Iterator is the element that iterates over something. + Iterator + + // While is a loop construct with a condition and a body. + While + + // DoWhile is a loop construct with a body and a condition. + DoWhile + + // Break is a construct for early exiting a block. + Break + + // Continue is a construct for continuation with the next iteration of a loop. + Continue + + // Goto is an unconditional transfer of control statement. + Goto + + // Block is a group of statements. If the source language has block scope, + // it should be annotated both with Block and BlockScope. + Block + + // Scope is a range in which a variable can be referred. + Scope + + // Return is a return statement. It might have a child expression or not + // as with naked returns in Go or return in void methods in Java. + Return + + // Try is a statement for exception handling. + Try + + // Catch is a clause to capture exceptions. + Catch + + // Finally is a clause for a block executed after a block with exception handling. + Finally + + // Throw is a statement that creates an exception. + Throw + + // Assert checks if an expression is true and if it is not, it signals + // an error/exception, possibly stopping the execution. + Assert + + // Call is any call, whether it is a function, procedure, method or macro. + // In its simplest form, a call will have a single child with a function + // name (callee). Arguments are marked with Argument and Positional or Name. + // In OO languages there is usually a Receiver too. + Call + + // Callee is the callable being called. It might be the name of a + // function or procedure, it might be a method, it might a simple name + // or qualified with a namespace. + Callee + + // Positional is an element which position has meaning (i.e. a positional argument in a call). + Positional + + // Noop is a construct that does nothing. + Noop + + // Literal is a literal value. + Literal + + // Byte is a single-byte element. + Byte + + // ByteString is a raw byte string. + ByteString + + // Character is an encoded character. + Character + + // List is a sequence. + List + + // Map is a collection of key, value pairs. + Map + + // Null is an empty value. + Null + + // Number is a numeric value. This applies to any numeric value + // whether it is integer or float, any base, scientific notation or not, + // etc. + Number + + // Regexp is a regular expression. + Regexp + + // Set is a collection of values. + Set + + // String is a sequence of characters. + String + + // Tuple is an finite ordered sequence of elements. + Tuple + + // Type is a classification of data. + Type + + // Entry is a collection element. + Entry + + // Key is the index value of a map. + Key + + // Primitive is a language builtin. + Primitive + + // Assignment is an assignment operator. + Assignment + + // This represents the self-reference of an object instance in + // one of its methods. This corresponds to the `this` keyword + // (e.g. Java, C++, PHP), `self` (e.g. Smalltalk, Perl, Swift) and `Me` + // (e.g. Visual Basic). + This + + // Comment is a code comment. + Comment + + // Documentation is a node that represents documentation of another node, + // such as function or package. Documentation is usually in the form of + // a string in certain position (e.g. Python docstring) or comment + // (e.g. Javadoc, godoc). + Documentation + + // Whitespace is a node containing whitespace(s). + Whitespace + + // Incomplete express that the semantic meaning of the node roles doesn't express + // the full semantic information. Added in BIP-002. + Incomplete + + // Unannotated will be automatically added by the SDK for nodes that did not receive + // any annotations with the current version of the driver's `annotations.go` file. + // Added in BIP-002. + Unannotated + + // Visibility is an access granter role, usually together with an specifier role + Visibility + + // Annotation is syntactic metadata + Annotation + + // Anonymous is an unbound construct + Anonymous + + // Enumeration is a distinct type that represents a set of named constants + Enumeration + + // Arithmetic is a type of operation + Arithmetic + + // Relational is a type of operation + Relational + + // Variable is a symbolic name associatend with a value + Variable +) diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/role_string.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/role_string.go new file mode 100644 index 0000000..ec22309 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/role/role_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Role"; DO NOT EDIT. + +package role + +import "strconv" + +const _Role_name = "InvalidIdentifierQualifiedOperatorBinaryUnaryLeftRightInfixPostfixBitwiseBooleanUnsignedLeftShiftRightShiftOrXorAndExpressionStatementEqualNotLessThanLessThanOrEqualGreaterThanGreaterThanOrEqualIdenticalContainsIncrementDecrementNegativePositiveDereferenceTakeAddressFileAddSubstractMultiplyDivideModuloPackageDeclarationImportPathnameAliasFunctionBodyNameReceiverArgumentValueArgsListBaseImplementsInstanceSubtypeSubpackageModuleFriendWorldIfConditionThenElseSwitchCaseDefaultForInitializationUpdateIteratorWhileDoWhileBreakContinueGotoBlockScopeReturnTryCatchFinallyThrowAssertCallCalleePositionalNoopLiteralByteByteStringCharacterListMapNullNumberRegexpSetStringTupleTypeEntryKeyPrimitiveAssignmentThisCommentDocumentationWhitespaceIncompleteUnannotatedVisibilityAnnotationAnonymousEnumerationArithmeticRelationalVariable" + +var _Role_index = [...]uint16{0, 7, 17, 26, 34, 40, 45, 49, 54, 59, 66, 73, 80, 88, 97, 107, 109, 112, 115, 125, 134, 139, 142, 150, 165, 176, 194, 203, 211, 220, 229, 237, 245, 256, 267, 271, 274, 283, 291, 297, 303, 310, 321, 327, 335, 340, 348, 352, 356, 364, 372, 377, 385, 389, 399, 407, 414, 424, 430, 436, 441, 443, 452, 456, 460, 466, 470, 477, 480, 494, 500, 508, 513, 520, 525, 533, 537, 542, 547, 553, 556, 561, 568, 573, 579, 583, 589, 599, 603, 610, 614, 624, 633, 637, 640, 644, 650, 656, 659, 665, 670, 674, 679, 682, 691, 701, 705, 712, 725, 735, 745, 756, 766, 776, 785, 796, 806, 816, 824} + +func (i Role) String() string { + if i < 0 || i >= Role(len(_Role_index)-1) { + return "Role(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Role_name[_Role_index[i]:_Role_index[i+1]] +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/arrays.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/arrays.go new file mode 100644 index 0000000..82d5f0d --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/arrays.go @@ -0,0 +1,411 @@ +package transformer + +import "gopkg.in/bblfsh/sdk.v2/uast/nodes" + +func MapEach(vr string, m Mapping) Mapping { + src, dst := m.Mapping() + return Map(Each(vr, src), Each(vr, dst)) +} + +// ArrayOp is a subset of operations that operates on an arrays with a pre-defined size. See Arr. +type ArrayOp interface { + Op + arr(st *State) (opArr, error) +} + +// Arr checks if the current object is a list with a number of elements +// matching a number of ops, and applies ops to corresponding elements. +// Reversal creates a list of the size that matches the number of ops +// and creates each element with the corresponding op. +func Arr(ops ...Op) ArrayOp { + return opArr(ops) +} + +type opArr []Op + +func (opArr) Kinds() nodes.Kind { + return nodes.KindArray +} + +func (op opArr) arr(_ *State) (opArr, error) { + return op, nil +} +func (op opArr) Check(st *State, n nodes.Node) (bool, error) { + arr, ok := n.(nodes.Array) + if !ok { + return filtered("%+v is not a list, %+v", n, op) + } else if len(arr) != len(op) { + return filtered("%+v has wrong len for %+v", n, op) + } + for i, sub := range op { + if ok, err := sub.Check(st, arr[i]); err != nil { + return false, errElem.Wrap(err, i, sub) + } else if !ok { + return false, nil + } + } + return true, nil +} + +func (op opArr) Construct(st *State, n nodes.Node) (nodes.Node, error) { + if err := noNode(n); err != nil { + return nil, err + } + arr := make(nodes.Array, 0, len(op)) + for i, sub := range op { + nn, err := sub.Construct(st, n) + if err != nil { + return nil, errElem.Wrap(err, i, sub) + } + arr = append(arr, nn) + } + return arr, nil +} + +// One is a shorthand for a list with one element. +func One(op Op) ArrayOp { + return Arr(op) +} + +// LookupArrOpVar is like LookupOpVar but returns an array operation. +// Default value can be specified by setting the nil key. +func LookupArrOpVar(vr string, cases map[nodes.Value]ArrayOp) ArrayOp { + def := cases[nil] + delete(cases, nil) + return opLookupArrOp{vr: vr, cases: cases, def: def} +} + +type opLookupArrOp struct { + vr string + def ArrayOp + cases map[nodes.Value]ArrayOp +} + +func (opLookupArrOp) Kinds() nodes.Kind { + return nodes.KindArray +} + +func (op opLookupArrOp) arr(st *State) (opArr, error) { + vn, err := st.MustGetVar(op.vr) + if err != nil { + return nil, err + } + v, ok := vn.(nodes.Value) + if !ok { + return nil, ErrExpectedValue.New(vn) + } + sub, ok := op.cases[v] + if !ok { + if op.def == nil { + return nil, ErrUnhandledValueIn.New(v, op.cases) + } + sub = op.def + } + return sub.arr(st) +} + +func (op opLookupArrOp) Check(st *State, n nodes.Node) (bool, error) { + sub, err := op.arr(st) + if err != nil { + return false, err + } + return sub.Check(st, n) +} + +func (op opLookupArrOp) Construct(st *State, n nodes.Node) (nodes.Node, error) { + sub, err := op.arr(st) + if err != nil { + return nil, err + } + return sub.Construct(st, n) +} + +// PrependOne prepends a single element to an array. +func PrependOne(first Op, arr Op) Op { + return prependOne{first: first, tail: arr} +} + +type prependOne struct { + first, tail Op +} + +func (prependOne) Kinds() nodes.Kind { + return nodes.KindArray +} + +func (op prependOne) Check(st *State, n nodes.Node) (bool, error) { + arr, ok := n.(nodes.Array) + if !ok { + return false, nil + } else if len(arr) < 1 { + return false, nil + } + first, tail := arr[0], arr[1:] + if ok, err := op.first.Check(st, first); err != nil || !ok { + return false, err + } + if ok, err := op.tail.Check(st, tail); err != nil || !ok { + return false, err + } + return true, nil +} + +func (op prependOne) Construct(st *State, n nodes.Node) (nodes.Node, error) { + first, err := op.first.Construct(st, n) + if err != nil { + return nil, err + } + tail, err := op.tail.Construct(st, n) + if err != nil { + return nil, err + } + arr, ok := tail.(nodes.Array) + if !ok && tail != nil { + return nil, ErrExpectedList.New(tail) + } + out := make(nodes.Array, 0, len(arr)+1) + out = append(out, first) + out = append(out, arr...) + return out, nil +} + +// Append is like AppendArr but allows to set more complex first operation. +// Result of this operation should still be an array. +func Append(to Op, items ...ArrayOp) Op { + if len(items) == 0 { + return to + } + return opAppend{op: to, arrs: opAppendArr{arrs: items}} +} + +type opAppend struct { + op Op + arrs opAppendArr +} + +func (opAppend) Kinds() nodes.Kind { + return nodes.KindArray +} + +func (op opAppend) Check(st *State, n nodes.Node) (bool, error) { + arr, ok := n.(nodes.Array) + if !ok { + return filtered("%+v is not a list, %+v", n, op) + } + sarr, err := op.arrs.arr(st) + if err != nil { + return false, err + } + if len(sarr) > len(arr) { + return filtered("array %+v is too small for %+v", n, op) + } + // split into array part that will go to sub op, + // and the part we will use for sub-array checks + tail := len(arr) - len(sarr) + sub, arrs := arr[:tail], arr[tail:] + if len(sub) == 0 { + sub = nil + } + if ok, err := op.op.Check(st, sub); err != nil { + return false, errAppend.Wrap(err) + } else if !ok { + return false, nil + } + return sarr.Check(st, arrs) +} + +func (op opAppend) Construct(st *State, n nodes.Node) (nodes.Node, error) { + n, err := op.op.Construct(st, n) + if err != nil { + return nil, err + } + arr, ok := n.(nodes.Array) + if !ok { + return nil, ErrExpectedList.New(n) + } + sarr, err := op.arrs.arr(st) + if err != nil { + return nil, err + } + nn, err := sarr.Construct(st, nil) + if err != nil { + return nil, err + } + arr2, ok := nn.(nodes.Array) + if !ok { + return nil, ErrExpectedList.New(n) + } + arr = append(arr, arr2...) + return arr, nil +} + +// AppendArr asserts that a node is a Array and checks that it contains a defined set of nodes at the end. +// Reversal uses sub-operation to create a Array and appends provided element lists at the end of it. +func AppendArr(items ...ArrayOp) ArrayOp { + if len(items) == 1 { + return items[0] + } + return opAppendArr{arrs: items} +} + +type opAppendArr struct { + arrs []ArrayOp +} + +func (opAppendArr) Kinds() nodes.Kind { + return nodes.KindArray +} + +func (op opAppendArr) arr(st *State) (opArr, error) { + var arr opArr + for _, sub := range op.arrs { + a, err := sub.arr(st) + if err != nil { + return nil, err + } + arr = append(arr, a...) + } + return arr, nil +} + +func (op opAppendArr) Check(st *State, n nodes.Node) (bool, error) { + sarr, err := op.arr(st) + if err != nil { + return false, err + } + return sarr.Check(st, n) +} + +func (op opAppendArr) Construct(st *State, n nodes.Node) (nodes.Node, error) { + sarr, err := op.arr(st) + if err != nil { + return nil, err + } + return sarr.Construct(st, n) +} + +// Each checks that current node is an array and applies sub-operation to each element. +// It uses a variable to store state of each element. +func Each(vr string, op Op) Op { + return opEach{vr: vr, op: op} +} + +type opEach struct { + vr string + op Op +} + +func (opEach) Kinds() nodes.Kind { + return nodes.KindNil | nodes.KindArray +} + +func (op opEach) Check(st *State, n nodes.Node) (bool, error) { + arr, ok := n.(nodes.Array) + if !ok && n != nil { + return filtered("%+v is not a list, %+v", n, op) + } + var subs []*State + if arr != nil { + subs = make([]*State, 0, len(arr)) + } + for i, sub := range arr { + sst := NewState() + ok, err := op.op.Check(sst, sub) + if err != nil { + return false, errElem.Wrap(err, i, sub) + } else if !ok { + return false, nil + } + subs = append(subs, sst) + } + if err := st.SetStateVar(op.vr, subs); err != nil { + return false, err + } + return true, nil +} + +func (op opEach) Construct(st *State, n nodes.Node) (nodes.Node, error) { + if err := noNode(n); err != nil { + return nil, err + } + subs, ok := st.GetStateVar(op.vr) + if !ok { + return nil, ErrVariableNotDefined.New(op.vr) + } + if subs == nil { + return nil, nil + } + arr := make(nodes.Array, 0, len(subs)) + for i, stt := range subs { + sub, err := op.op.Construct(stt, nil) + if err != nil { + return nil, errElem.Wrap(err, i, nil) + } + arr = append(arr, sub) + } + return arr, nil +} + +func ArrWith(arr Op, items ...Op) Op { + if len(items) == 0 { + return arr + } + return opArrWith{arr: arr, items: items} +} + +type opArrWith struct { + arr Op + items []Op +} + +func (op opArrWith) Kinds() nodes.Kind { + return (nodes.KindArray | nodes.KindNil) & op.arr.Kinds() +} + +func (op opArrWith) Check(st *State, n nodes.Node) (bool, error) { + arr, ok := n.(nodes.Array) + if !ok { + return false, nil + } + arr = arr.CloneList() + // find items in the array and remove them from it + for _, s := range op.items { + found := false + for i, v := range arr { + sst := st.Clone() + if ok, err := s.Check(sst, v); err != nil { + return false, err + } else if ok { + st.ApplyFrom(sst) + arr = append(arr[:i], arr[i+1:]...) + found = true + break + } + } + if !found { + if ok, err := s.Check(st, nil); err != nil || !ok { + return false, err + } + } + } + return op.arr.Check(st, arr) +} + +func (op opArrWith) Construct(st *State, n nodes.Node) (nodes.Node, error) { + n, err := op.arr.Construct(st, n) + if err != nil { + return nil, err + } + arr, ok := n.(nodes.Array) + if !ok { + return nil, ErrExpectedList.New(n) + } + for _, s := range op.items { + v, err := s.Construct(st, nil) + if err != nil { + return nil, err + } + arr = append(arr, v) + } + return arr, nil +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/ast.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/ast.go new file mode 100644 index 0000000..f492d94 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/ast.go @@ -0,0 +1,429 @@ +package transformer + +import ( + "fmt" + "strings" + + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/role" +) + +// SavePosOffset makes an operation that describes a uast.Position object with Offset field set to a named variable. +func SavePosOffset(vr string) Op { + return UASTType(uast.Position{}, Obj{ + uast.KeyPosOff: Var(vr), + }) +} + +// SavePosLineCol makes an operation that describes a uast.Position object with Line and Col field set to named variables. +func SavePosLineCol(varLine, varCol string) Op { + return UASTType(uast.Position{}, Obj{ + uast.KeyPosLine: Var(varLine), + uast.KeyPosCol: Var(varCol), + }) +} + +// Roles makes an operation that will check/construct a list of roles. +func Roles(roles ...role.Role) ArrayOp { + arr := make([]Op, 0, len(roles)) + for _, r := range roles { + arr = append(arr, Is(nodes.String(r.String()))) + } + return Arr(arr...) +} + +// AppendRoles can be used to append more roles to an output of a specific operation. +func AppendRoles(old ArrayOp, roles ...role.Role) ArrayOp { + if len(roles) != 0 && old != nil { + return AppendArr(old, Roles(roles...)) + } else if old != nil { + return old + } + return Roles(roles...) +} + +// RolesField will create a roles field that appends provided roles to existing ones. +// In case no roles are provided, it will save existing roles, if any. +func RolesField(vr string, roles ...role.Role) Field { + return RolesFieldOp(vr, nil, roles...) +} + +// RolesFieldOp is like RolesField but allows to specify custom roles op to use. +func RolesFieldOp(vr string, op ArrayOp, roles ...role.Role) Field { + if len(roles) == 0 && op == nil { + return Field{ + Name: uast.KeyRoles, + Op: Var(vr), + Optional: vr + "_exists", + } + } + rop := AppendRoles(op, roles...) + return Field{ + Name: uast.KeyRoles, + Op: If(vr+"_exists", + Append(NotEmpty(Var(vr)), rop), + rop, + ), + } +} + +// ASTObjectLeft construct a native AST shape for a given type name. +func ASTObjectLeft(typ string, ast ObjectOp) ObjectOp { + if fields, ok := ast.Fields(); !ok { + panic("unexpected partial transform") + } else if _, ok := fields[uast.KeyRoles]; ok { + panic("unexpected roles filed") + } + var obj Fields + if typ != "" { + obj = append(obj, Field{Name: uast.KeyType, Op: String(typ)}) + } + obj = append(obj, RolesField(typ+"_roles")) + return Part("_", JoinObj(ast, obj)) +} + +// ASTObjectRight constructs an annotated native AST shape with specific roles. +func ASTObjectRight(typ string, norm ObjectOp, rop ArrayOp, roles ...role.Role) ObjectOp { + return ASTObjectRightCustom(typ, norm, nil, rop, roles...) +} + +// RolesByType is a function for getting roles for specific AST node type. +type RolesByType func(typ string) role.Roles + +// ASTObjectRightCustom is like ASTObjectRight but allow to specify additional roles for each type. +func ASTObjectRightCustom(typ string, norm ObjectOp, fnc RolesByType, rop ArrayOp, roles ...role.Role) ObjectOp { + if fields, ok := norm.Fields(); !ok { + panic("unexpected partial transform") + } else if _, ok := fields[uast.KeyRoles]; ok { + panic("unexpected roles field") + } + var obj Fields + if typ != "" { + obj = append(obj, Field{Name: uast.KeyType, Op: String(typ)}) // TODO: ":" namespace + } + // it merges 3 slices: + // 1) roles saved from left side (if any) + // 2) static roles from arguments + // 3) roles from conditional operation + if fnc != nil { + if static := fnc(typ); len(static) > 0 { + roles = append([]role.Role{}, roles...) + roles = append(roles, static...) + } + } + obj = append(obj, RolesFieldOp(typ+"_roles", rop, roles...)) + return Part("_", JoinObj(norm, obj)) +} + +// ObjectRoles creates a shape that adds additional roles to an object. +// Should only be used in other object fields, since it does not set any type constraints. +// Specified variable name (vr) will be used as a prefix for variables to store old roles and unprocessed object keys. +func ObjectRoles(vr string, roles ...role.Role) Op { + return ObjectRolesCustom(vr, nil, roles...) +} + +// ObjectRolesCustom is like ObjectRoles but allows to apecify additional conatraints for object. +func ObjectRolesCustom(vr string, obj ObjectOp, roles ...role.Role) Op { + return ObjectRolesCustomOp(vr, obj, nil, roles...) +} + +// ObjectRolesCustomOp is like ObjectRolesCustom but allows to apecify a custom roles lookup. +func ObjectRolesCustomOp(vr string, obj ObjectOp, rop ArrayOp, roles ...role.Role) Op { + f := Fields{RolesFieldOp(vr+"_roles", rop, roles...)} + if obj == nil { + obj = f + } else { + obj = JoinObj(obj, f) + } + return Part(vr, obj) +} + +// EachObjectRoles is a helper that constructs Each(ObjectRoles(roles)) operation. +// It will annotate all elements of the array with a specified list of roles. +func EachObjectRoles(vr string, roles ...role.Role) Op { + return eachObjectRolesByType(vr, nil, roles...) +} + +// EachObjectRolesByType is like EachObjectRoles but adds additional roles for each type specified in the map. +// EachObjectRolesByType should always be paired on both side of transform because it uses variables to store type info. +func EachObjectRolesByType(vr string, types map[string][]role.Role, roles ...role.Role) Op { + if types == nil { + types = make(map[string][]role.Role) + } + return eachObjectRolesByType(vr, types, roles...) +} + +func eachObjectRolesByType(vr string, types map[string][]role.Role, roles ...role.Role) Op { + var ( + obj ObjectOp + rop ArrayOp + ) + if types != nil { + tvar := vr + "_type" + obj = Obj{ + uast.KeyType: Var(tvar), + } + if len(types) != 0 { + cases := make(map[nodes.Value]ArrayOp, len(types)) + for typ, arr := range types { + var key nodes.Value + if typ != "" { + key = nodes.String(typ) + } + cases[key] = Roles(arr...) + } + rop = LookupArrOpVar(tvar, cases) + } + } + return Each(vr+"_arr", ObjectRolesCustomOp(vr, obj, rop, roles...)) +} + +// OptObjectRoles is like ObjectRoles, but marks an object as optional. +func OptObjectRoles(vr string, roles ...role.Role) Op { + return Opt(vr+"_set", ObjectRoles(vr, roles...)) +} + +// Operator is a helper to make an AST node describing an operator. +func Operator(vr string, lookup map[nodes.Value]ArrayOp, roles ...role.Role) ObjectOp { + roles = append([]role.Role{ + role.Expression, role.Operator, + }, roles...) + var opRoles Op = Roles(roles...) + if lookup != nil { + opRoles = AppendRoles( + LookupArrOpVar(vr, lookup), + roles..., + ) + } + return Fields{ + {Name: uast.KeyType, Op: String(uast.TypeOperator)}, + {Name: uast.KeyToken, Op: Var(vr)}, + {Name: uast.KeyRoles, Op: opRoles}, + } +} + +func uncomment(s string) (string, error) { + // Remove // and /*...*/ from comment nodes' tokens + if strings.HasPrefix(s, "//") { + s = s[2:] + } else if strings.HasPrefix(s, "/*") { + s = s[2 : len(s)-2] + } + return s, nil +} + +func comment(s string) (string, error) { + if strings.Contains(s, "\n") { + return "/*" + s + "*/", nil + } + return "//" + s, nil +} + +// UncommentCLike removes // and /* */ symbols from a given string variable. +func UncommentCLike(vr string) Op { + return StringConv(Var(vr), uncomment, comment) +} + +// Uncomment removes specified tokens from the beginning and from the end of a given string variable. +func Uncomment(vr string, tokens [2]string) Op { + return StringConv(Var(vr), func(s string) (string, error) { + s = strings.TrimPrefix(s, tokens[0]) + s = strings.TrimSuffix(s, tokens[1]) + return s, nil + }, func(s string) (string, error) { + return tokens[0] + s + tokens[1], nil + }) +} + +var _ ObjMapping = ObjRoles{} + +// ObjRoles is a helper type that stores a mapping from field names to their roles. +type ObjRoles map[string][]role.Role + +func (o ObjRoles) Mapping() (src, dst Op) { + return o.ObjMapping() +} +func (o ObjRoles) ObjMapping() (src, dst ObjectOp) { + m := make(FieldRoles, len(o)) + for name, roles := range o { + m[name] = FieldRole{Opt: true, Roles: roles} + } + return m.ObjMapping() +} + +// FieldRole is a list of operations that can be applied to an object field. +type FieldRole struct { + Rename string // rename the field to this name in resulting tree + + Skip bool // omit this field in the resulting tree + Add bool // create this field in the resulting tree + + Opt bool // field can be nil + Arr bool // field is an array; apply roles or custom operation to each element + Sub ObjMapping // a mapping that will be used for this field; overrides Op + Op Op // use this operation for the field on both sides of transformation + Roles []role.Role // list of roles to append to the field; has no effect if Op is set +} + +func (f FieldRole) validate() error { + if f.Arr && f.Opt { + return fmt.Errorf("field should either be a list or optional") + } + opSet := len(f.Roles) != 0 || f.Op != nil || f.Sub != nil + if !opSet && (f.Opt || f.Arr) { + return fmt.Errorf("either roles or operation should be set to use Opt or Arr") + } + if f.Skip && (f.Opt || f.Arr || opSet) { + return fmt.Errorf("skip cannot be used with other operations") + } + if f.Skip && (f.Rename != "" && !f.Add) { + return fmt.Errorf("rename can only be used with skip when Add is set") + } + return nil +} + +func (f FieldRole) build(name, pref string) (names [2]string, ops [2]Op, _ error) { + if err := f.validate(); err != nil { + return names, ops, err + } + rname := name + if f.Rename != "" { + rname = f.Rename + } + vr := pref + "var" + var l, r Op + if f.Sub != nil { + lo, ro := ObjScope(pref, f.Sub).ObjMapping() + if len(f.Roles) != 0 { + lo = JoinObj(lo, Fields{RolesField(vr)}) + ro = JoinObj(ro, Fields{RolesField(vr, f.Roles...)}) + } + pvr := vr + "m" + // this helper performs an additional checkto see if it already contains Part + l, r = MapPart(pvr, MapObj(lo, ro)).ObjMapping() + if f.Arr { + lvr := vr + "list" + l, r = Each(lvr, l), Each(lvr, r) + } else if f.Opt { + lvr := vr + "set" + l, r = Opt(lvr, l), Opt(lvr, r) + } + } else if f.Op != nil { + l, r = f.Op, f.Op + } else if len(f.Roles) == 0 { + l, r = Var(vr), Var(vr) + } else { + var fnc func(vr string, roles ...role.Role) Op + if f.Arr { + fnc = EachObjectRoles + } else if f.Opt { + fnc = OptObjectRoles + } else { + fnc = ObjectRoles + } + l, r = fnc(vr), fnc(vr, f.Roles...) + } + if f.Skip { + l = AnyVal(nil) + } + if f.Skip || !f.Add { + names[0] = name + ops[0] = l + } + if !f.Skip || f.Add { + names[1] = rname + ops[1] = r + } + return names, ops, nil +} + +var _ ObjMapping = FieldRoles{} + +// FieldRoles is a helper type that stores a mapping from field names to operations that needs to be applied to it. +type FieldRoles map[string]FieldRole + +func (f FieldRoles) Mapping() (src, dst Op) { + return f.ObjMapping() +} +func (f FieldRoles) ObjMapping() (src, dst ObjectOp) { + l := make(Fields, 0, len(f)) + r := make(Fields, 0, len(f)) + for name, fld := range f { + names, ops, err := fld.build(name, name+"_") + if err != nil { + panic(fmt.Errorf("field %q: %v", name, err)) + } + opt := "" + if fld.Opt { + opt = name + "__exists" + } + if names[0] != "" { + l = append(l, Field{Name: names[0], Op: ops[0], Optional: opt}) + } + if names[1] != "" { + r = append(r, Field{Name: names[1], Op: ops[1], Optional: opt}) + } + } + return l, r +} + +// AnnotateTypeCustom is like AnnotateType but allows to specify custom roles operation as well as a mapper function. +func AnnotateTypeCustom(typ string, fields ObjMapping, rop ArrayOp, roles ...role.Role) ObjMapping { + if fields == nil { + fields = MapObj(Obj{}, Obj{}) + } + ast, norm := fields.ObjMapping() + return MapObj( + ASTObjectLeft(typ, ast), + ASTObjectRight(typ, norm, rop, roles...), + ) +} + +// AnnotateTypeCustomMap is like AnnotateTypeCustom, but allows to specify additional roles for each type. +func AnnotateTypeCustomMap(typ string, fields ObjMapping, fnc RolesByType, rop ArrayOp, roles ...role.Role) ObjMapping { + if fields == nil { + fields = MapObj(Obj{}, Obj{}) + } + ast, norm := fields.ObjMapping() + return MapObj( + ASTObjectLeft(typ, ast), + ASTObjectRightCustom(typ, norm, fnc, rop, roles...), + ) +} + +// AnnotateType is a helper to assign roles to specific fields. All fields are assumed to be optional and should be objects. +func AnnotateType(typ string, fields ObjMapping, roles ...role.Role) ObjMapping { + return AnnotateTypeCustom(typ, fields, nil, roles...) +} + +// StringToRolesMap is a helper to generate an array operation map that can be used for Lookup +// from a map from string values to roles. +func StringToRolesMap(m map[string][]role.Role) map[nodes.Value]ArrayOp { + out := make(map[nodes.Value]ArrayOp, len(m)) + for tok, roles := range m { + out[nodes.String(tok)] = Roles(roles...) + } + return out +} + +// AnnotateIfNoRoles adds roles to specific type if there were no roles set for it yet. +// +// Since rules are applied depth-first, this operation will work properly only in a separate mapping step. +// In other cases it will apply itself before parent node appends field roles. +func AnnotateIfNoRoles(typ string, roles ...role.Role) Mapping { + return Map( + Check( // TODO: CheckObj + Not(Has{ + uast.KeyRoles: AnyNode(nil), + }), + Part("_", Obj{ + uast.KeyType: String(typ), + }), + ), + Part("_", Obj{ + uast.KeyType: String(typ), + uast.KeyRoles: Roles(roles...), + }), + ) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/conv.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/conv.go new file mode 100644 index 0000000..2978c0a --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/conv.go @@ -0,0 +1,19 @@ +package transformer + +import ( + "fmt" + "strconv" +) + +// Quote uses strconv.Quote/Unquote to wrap provided string value. +func Quote(op Op) Op { + return StringConv(op, func(s string) (string, error) { + ns, err := strconv.Unquote(s) + if err != nil { + return "", fmt.Errorf("%v (%s)", err, s) + } + return ns, nil + }, func(s string) (string, error) { + return strconv.Quote(s), nil + }) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/errors.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/errors.go new file mode 100644 index 0000000..89535dd --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/errors.go @@ -0,0 +1,85 @@ +package transformer + +import ( + "bytes" + "fmt" + + "gopkg.in/src-d/go-errors.v1" +) + +var ( + // ErrVariableRedeclared is returned when a transformation defines the same variable twice. + ErrVariableRedeclared = errors.NewKind("variable %q redeclared (%v vs %v)") + // ErrVariableNotDefined is returned when an operation excepts a variable to be defined, but it was not set + // in current transformation state. + // + // Receiving this error might mean that transformations have an incorrect order and tries to use a variable + // in Lookup or If, for example, before another transformation step that sets this variable is executed. + // If this is the case, see Fields vs Obj comparison in regards to execution order. + ErrVariableNotDefined = errors.NewKind("variable %q is not defined") + // ErrExpectedObject is returned when transformation expected an object in the tree or variable, but got other type. + ErrExpectedObject = errors.NewKind("expected object, got %T") + // ErrExpectedList is returned when transformation expected an array in the tree or variable, but got other type. + ErrExpectedList = errors.NewKind("expected list, got %T") + // ErrExpectedValue is returned when transformation expected a value in the tree or variable, but got other type. + ErrExpectedValue = errors.NewKind("expected value, got %T") + // ErrUnhandledValueIn is returned when Lookup fails to find a value in the map. It's recommended to define all + // values in the lookup map. If it's not possible, use nil value as a key to set default case for Lookup. + ErrUnhandledValueIn = errors.NewKind("unhandled value: %v in %v") + // ErrUnexpectedNode is an internal error returned by SDK when a transformation that creates a value receives another + // value as an argument. This should not happen. + ErrUnexpectedNode = errors.NewKind("expected node to be nil, got: %v") + // ErrUnexpectedValue is returned when the value from state does not match constraints enforced by Construct. + ErrUnexpectedValue = errors.NewKind("unexpected value: %v") + // ErrUnexpectedType is returned in both Check and Construct when unexpected type is received as an argument or variable. + ErrUnexpectedType = errors.NewKind("unexpected type: exp %T vs got %T") + // ErrAmbiguousValue is returned when Lookup map contains the same value mapped to different keys. + ErrAmbiguousValue = errors.NewKind("map has ambiguous value %v") + // ErrUnusedField is returned when a transformation is not defined as partial, but does not process a specific key + // found in object. This usually means that an AST has a field that is not covered by transformation code and it + // should be added to the mapping. + ErrUnusedField = errors.NewKind("field was not used: %v") + // ErrDuplicateField is returned when trying to create a Fields definition with two items with the same name. + ErrDuplicateField = errors.NewKind("duplicate field: %v") + // ErrUndefinedField is returned when trying to create an object with a field that is not defined in the type spec. + ErrUndefinedField = errors.NewKind("undefined field: %v") + + errAnd = errors.NewKind("op %d (%T)") + errKey = errors.NewKind("key %q") + errElem = errors.NewKind("elem %d (%T)") + errAppend = errors.NewKind("append") + errMapping = errors.NewKind("mapping %q") + + errCheck = errors.NewKind("check") + errConstruct = errors.NewKind("construct") +) + +var _ error = (*MultiError)(nil) + +// NewMultiError creates an error that contains multiple other errors. +// If a slice is empty, it returns nil. If there is only one error, it is returned directly. +func NewMultiError(errs ...error) error { + if len(errs) == 0 { + return nil + } else if len(errs) == 1 { + return errs[0] + } + return &MultiError{Errs: errs} +} + +// MultiError is an error that groups multiple other errors. +type MultiError struct { + Errs []error +} + +func (e *MultiError) Error() string { + if len(e.Errs) == 1 { + return e.Errs[0].Error() + } + buf := bytes.NewBuffer(nil) + fmt.Fprintf(buf, "received %d errors:\n", len(e.Errs)) + for _, err := range e.Errs { + fmt.Fprintf(buf, "\t%v\n", err) + } + return buf.String() +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/legacy.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/legacy.go new file mode 100644 index 0000000..46531fd --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/legacy.go @@ -0,0 +1,148 @@ +package transformer + +import ( + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/role" +) + +const dedupCloneObj = false + +var _ Transformer = ResponseMetadata{} + +// ResponseMetadata is a transformation that is applied to the root of AST tree to trim any metadata that might be there. +type ResponseMetadata struct { + // TopLevelIsRootNode tells ToNode where to find the root node of + // the AST. If true, the root will be its input argument. If false, + // the root will be the value of the only key present in its input + // argument. + TopLevelIsRootNode bool +} + +// Do applies the transformation described by this object. +func (n ResponseMetadata) Do(root nodes.Node) (nodes.Node, error) { + if obj, ok := root.(nodes.Object); ok && !n.TopLevelIsRootNode && len(obj) == 1 { + for _, v := range obj { + root = v + break + } + } + return root, nil +} + +// ObjectToNode transform trees that are represented as nested JSON objects. +// That is, an interface{} containing maps, slices, strings and integers. It +// then converts from that structure to *Node. +type ObjectToNode struct { + // InternalTypeKey is the name of the key that the native AST uses + // to differentiate the type of the AST nodes. This internal key will then be + // checkable in the AnnotationRules with the `HasInternalType` predicate. This + // field is mandatory. + InternalTypeKey string + // OffsetKey is the key used in the native AST to indicate the absolute offset, + // from the file start position, where the code mapped to the AST node starts. + OffsetKey string + // EndOffsetKey is the key used in the native AST to indicate the absolute offset, + // from the file start position, where the code mapped to the AST node ends. + EndOffsetKey string + // LineKey is the key used in the native AST to indicate + // the line number where the code mapped to the AST node starts. + LineKey string + // EndLineKey is the key used in the native AST to indicate + // the line number where the code mapped to the AST node ends. + EndLineKey string + // ColumnKey is a key that indicates the column inside the line + ColumnKey string + // EndColumnKey is a key that indicates the column inside the line where the node ends. + EndColumnKey string +} + +// Mapping construct a transformation from ObjectToNode definition. +func (n ObjectToNode) Mapping() Mapping { + var ( + ast Fields + // -> + norm = make(Obj) + normPos Fields + ) + + if n.InternalTypeKey != "" { + const vr = "itype" + ast = append(ast, Field{Name: n.InternalTypeKey, Op: Var(vr)}) + norm[uast.KeyType] = Var(vr) + } + + if n.OffsetKey != "" { + const vr = "pos_off_start" + ast = append(ast, Field{Name: n.OffsetKey, Op: Var(vr)}) + normPos = append(normPos, Field{Name: uast.KeyStart, Op: SavePosOffset(vr)}) + } + if n.EndOffsetKey != "" { + const vr = "pos_off_end" + ast = append(ast, Field{Name: n.EndOffsetKey, Op: Var(vr)}) + normPos = append(normPos, Field{Name: uast.KeyEnd, Op: SavePosOffset(vr)}) + } + if n.LineKey != "" && n.ColumnKey != "" { + const ( + vre = "pos_start_exists" + vrl = "pos_line_start" + vrc = "pos_col_start" + ) + ast = append(ast, + Field{Name: n.LineKey, Op: Var(vrl), Optional: vre}, + Field{Name: n.ColumnKey, Op: Var(vrc), Optional: vre}, + ) + normPos = append(normPos, Field{Name: uast.KeyStart, Op: SavePosLineCol(vrl, vrc), Optional: vre}) + } else if (n.LineKey != "" && n.ColumnKey == "") || (n.LineKey == "" && n.ColumnKey != "") { + panic("both LineKey and ColumnKey should either be set or not") + } + if n.EndLineKey != "" && n.EndColumnKey != "" { + const ( + vre = "pos_end_exists" + vrl = "pos_line_end" + vrc = "pos_col_end" + ) + ast = append(ast, + Field{Name: n.EndLineKey, Op: Var(vrl), Optional: vre}, + Field{Name: n.EndColumnKey, Op: Var(vrc), Optional: vre}, + ) + normPos = append(normPos, Field{Name: uast.KeyEnd, Op: SavePosLineCol(vrl, vrc), Optional: vre}) + } else if (n.EndLineKey != "" && n.EndColumnKey == "") || (n.EndLineKey == "" && n.EndColumnKey != "") { + panic("both EndLineKey and EndColumnKey should either be set or not") + } + if len(normPos) != 0 { + norm[uast.KeyPos] = UASTType(uast.Positions{}, normPos) + } + return MapPart("other", MapObj(ast, norm)) +} + +// RolesDedup is an irreversible transformation that removes duplicate roles from AST nodes. +func RolesDedup() TransformFunc { + return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) { + obj, ok := n.(nodes.Object) + if !ok { + return n, false, nil + } + roles := uast.RolesOf(obj) + if len(roles) == 0 { + return n, false, nil + } + m := make(map[role.Role]struct{}, len(roles)) + out := make([]role.Role, 0, len(roles)) + for _, r := range roles { + if _, ok := m[r]; ok { + continue + } + m[r] = struct{}{} + out = append(out, r) + } + if len(out) == len(roles) { + return n, false, nil + } + if dedupCloneObj { + obj = obj.CloneObject() + } + obj[uast.KeyRoles] = uast.RoleList(out...) + return obj, dedupCloneObj, nil + }) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/ops.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/ops.go new file mode 100644 index 0000000..08e739a --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/ops.go @@ -0,0 +1,1546 @@ +package transformer + +import ( + "fmt" + "sort" + + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +const ( + allowUnusedFields = false + errorOnFilterCheck = false +) + +func noNode(n nodes.Node) error { + if n == nil { + return nil + } + return ErrUnexpectedNode.New(n) +} + +func filtered(format string, args ...interface{}) (bool, error) { + if !errorOnFilterCheck { + return false, nil + } + return false, fmt.Errorf(format, args...) +} + +// Is checks if the current node to a given node. It can be a value, array or an object. +// Reversal clones the provided value into the tree. +func Is(o interface{}) MappingOp { + if n, ok := o.(nodes.Node); ok || o == nil { + return opIs{n: n} + } + n, err := uast.ToNode(o) + if err != nil { + panic(err) + } + return opIs{n: n} +} + +func OfKind(k nodes.Kind) Sel { + return opKind{k: k} +} + +type opKind struct { + k nodes.Kind +} + +func (op opKind) Kinds() nodes.Kind { + return op.k +} + +func (op opKind) Check(st *State, n nodes.Node) (bool, error) { + return nodes.KindOf(n).In(op.k), nil +} + +type opIs struct { + n nodes.Node +} + +func (op opIs) Mapping() (src, dst Op) { + return op, op +} + +func (op opIs) Kinds() nodes.Kind { + return nodes.KindOf(op.n) +} + +func (op opIs) Check(st *State, n nodes.Node) (bool, error) { + return nodes.Equal(op.n, n), nil +} + +func (op opIs) Construct(st *State, n nodes.Node) (nodes.Node, error) { + if op.n == nil { + return nil, nil + } + return op.n.Clone(), nil +} + +// Var stores current node as a value to a named variable in the shared state. +// Reversal replaces current node with the one from named variable. Variables can store subtrees. +func Var(name string) MappingOp { + return opVar{name: name, kinds: nodes.KindsAny} +} + +func VarKind(name string, k nodes.Kind) Op { + return Check(OfKind(k), Var(name)) +} + +type opVar struct { + name string + kinds nodes.Kind +} + +func (op opVar) Mapping() (src, dst Op) { + return op, op +} + +func (op opVar) Kinds() nodes.Kind { + return op.kinds +} + +func (op opVar) Check(st *State, n nodes.Node) (bool, error) { + if err := st.SetVar(op.name, n); err != nil { + return false, err + } + return true, nil +} + +func (op opVar) Construct(st *State, n nodes.Node) (nodes.Node, error) { + if err := noNode(n); err != nil { + return nil, err + } + val, err := st.MustGetVar(op.name) + if err != nil { + return nil, err + } + // TODO: should we clone it? + return val, nil +} + +// AnyNode matches any node and throws it away. Reversal will create a node with create op. +func AnyNode(create Mod) Op { + if create == nil { + create = Is(nil) + } + return opAnyNode{create: create} +} + +type opAnyNode struct { + create Mod +} + +func (opAnyNode) Kinds() nodes.Kind { + return nodes.KindsAny +} + +func (op opAnyNode) Check(st *State, n nodes.Node) (bool, error) { + return true, nil // always succeeds +} + +func (op opAnyNode) Construct(st *State, n nodes.Node) (nodes.Node, error) { + return op.create.Construct(st, n) +} + +// AnyVal accept any value and aways creates a node with a provided one. +func AnyVal(val nodes.Value) Op { + return AnyNode(Is(val)) +} + +// Seq checks current node with all ops in a sequence and fails if any of them fails. +// Reversal applies all modifications from ops to the current node. +// Typed ops should be at the beginning of the list to make sure that `Construct` +// creates a correct node type before applying specific changes to it. +func Seq(ops ...Op) Op { + if len(ops) == 1 { + return ops[0] + } + return opSeq(ops) +} + +type opSeq []Op + +func (op opSeq) Kinds() nodes.Kind { + var k nodes.Kind + for _, s := range op { + k |= s.Kinds() + } + return k +} + +func (op opSeq) Check(st *State, n nodes.Node) (bool, error) { + for i, sub := range op { + if ok, err := sub.Check(st, n); err != nil { + return false, errAnd.Wrap(err, i, sub) + } else if !ok { + return false, nil + } + } + return true, nil +} + +func (op opSeq) Construct(st *State, n nodes.Node) (nodes.Node, error) { + for i, sub := range op { + var err error + n, err = sub.Construct(st, n) + if err != nil { + return nil, errAnd.Wrap(err, i, sub) + } + } + return n, nil +} + +var _ ObjectOp = Obj{} + +var _ ObjectOps = Objs{} + +type Objs []Obj + +func (o Objs) ObjectOps() []ObjectOp { + l := make([]ObjectOp, 0, len(o)) + for _, s := range o { + l = append(l, s) + } + return l +} + +func EmptyObj() Op { + return Is(nodes.Object{}) +} + +// Obj is a helper for defining a transformation on an object fields. See Object. +// Operations will be sorted by the field name before execution. +type Obj map[string]Op + +func (Obj) Kinds() nodes.Kind { + return nodes.KindObject +} + +func (o Obj) Fields() (FieldDescs, bool) { + fields := make(FieldDescs, len(o)) + for k, v := range o { + f := FieldDesc{Optional: false} + if is, ok := v.(opIs); ok { + n := is.n + f.Fixed = &n + } + fields[k] = f + } + return fields, true +} + +// Object converts this helper to a full Object description. +func (o Obj) fields() Fields { + fields := make(Fields, 0, len(o)) + for k, op := range o { + fields = append(fields, Field{Name: k, Op: op}) + } + sort.Sort(ByFieldName(fields)) + return fields +} + +// Check will make an Object operation from this helper and call Check on it. +func (o Obj) Check(st *State, n nodes.Node) (bool, error) { + return o.fields().Check(st, n) +} + +// Construct will make an Object operation from this helper and call Construct on it. +func (o Obj) Construct(st *State, n nodes.Node) (nodes.Node, error) { + return o.fields().Construct(st, n) +} + +// CheckObj will make an Object operation from this helper and call Check on it. +func (o Obj) CheckObj(st *State, n nodes.Object) (bool, error) { + return o.fields().CheckObj(st, n) +} + +// ConstructObj will make an Object operation from this helper and call Construct on it. +func (o Obj) ConstructObj(st *State, n nodes.Object) (nodes.Object, error) { + return o.fields().ConstructObj(st, n) +} + +type FieldDesc struct { + Optional bool // field might not exists in the object + Fixed *nodes.Node // field is required to have a fixed value; the value may be nil +} + +// FieldDescs is a descriptions of static fields of an object. +// Transformation operation may return this struct to indicate what fields they will require. +type FieldDescs map[string]FieldDesc + +// Clone makes a copy of field description, without cloning each field values. +func (f FieldDescs) Clone() FieldDescs { + if f == nil { + return nil + } + fields := make(FieldDescs, len(f)) + for k, v := range f { + fields[k] = v + } + return fields +} + +// CheckObj verifies that an object matches field descriptions. +// It ignores all fields in the object that are not described. +func (f FieldDescs) CheckObj(n nodes.Object) bool { + for k, d := range f { + if d.Optional { + continue + } + v, ok := n[k] + if !ok { + return false + } + if d.Fixed != nil && !nodes.Equal(*d.Fixed, v) { + return false + } + } + return true +} + +// ObjectOp is an operation that is executed on an object. See Object. +type ObjectOp interface { + Op + // Fields returns a map of field names that will be processed by this operation. + // The flag if the map indicates if the field is required. + // False bool value returned as a second argument indicates that implementation will process all fields. + Fields() (FieldDescs, bool) + + CheckObj(st *State, n nodes.Object) (bool, error) + ConstructObj(st *State, n nodes.Object) (nodes.Object, error) +} + +type ObjectOps interface { + ObjectOps() []ObjectOp +} + +var _ ObjectOps = Objects{} + +type Objects []ObjectOp + +func (o Objects) ObjectOps() []ObjectOp { + return o +} + +func checkObj(op ObjectOp, st *State, n nodes.Node) (bool, error) { + cur, ok := n.(nodes.Object) + if !ok { + if errorOnFilterCheck { + return filtered("%+v is not an object\n%+v", n, op) + } + return false, nil + } + return op.CheckObj(st, cur) +} + +func constructObj(op ObjectOp, st *State, n nodes.Node) (nodes.Node, error) { + obj, ok := n.(nodes.Object) + if !ok { + if n != nil { + return nil, ErrExpectedObject.New(n) + } + obj = make(nodes.Object) + } + obj, err := op.ConstructObj(st, obj) + if err != nil { + return nil, err + } + return obj, nil +} + +// Part defines a partial transformation of an object. +// All unused fields will be stored into variable with a specified name. +func Part(vr string, o ObjectOp) ObjectOp { + used, ok := o.Fields() + if !ok { + panic("partial transform on an object with unknown fields") + } + return &opPartialObj{vr: vr, used: used, op: o} +} + +type opPartialObj struct { + vr string + used FieldDescs // fields that will be used by child operation + op ObjectOp +} + +func (op *opPartialObj) Kinds() nodes.Kind { + return nodes.KindObject +} + +func (op *opPartialObj) Fields() (FieldDescs, bool) { + return op.used, false +} + +func (op *opPartialObj) Check(st *State, n nodes.Node) (bool, error) { + return checkObj(op, st, n) +} + +// CheckObj will save all unknown fields and restore them to a new object on ConstructObj. +func (op *opPartialObj) CheckObj(st *State, n nodes.Object) (bool, error) { + if !op.used.CheckObj(n) { + return false, nil + } + // TODO: consider throwing an error if a transform is defined as partial, but in fact it's not + other := n.CloneObject() + n = make(nodes.Object) + for k := range op.used { + if _, ok := other[k]; ok { + n[k] = other[k] + delete(other, k) + } + } + if err := st.SetVar(op.vr, other); err != nil { + return false, err + } + return op.op.CheckObj(st, n) +} + +func (op *opPartialObj) Construct(st *State, n nodes.Node) (nodes.Node, error) { + return constructObj(op, st, n) +} + +// ConstructObj it will run a child operation and will also restore all unhandled fields. +func (op *opPartialObj) ConstructObj(st *State, obj nodes.Object) (nodes.Object, error) { + if obj == nil { + obj = make(nodes.Object) + } + obj, err := op.op.ConstructObj(st, obj) + if err != nil { + return nil, err + } + v, err := st.MustGetVar(op.vr) + if err != nil { + return nil, err + } + other, ok := v.(nodes.Object) + if !ok { + return nil, ErrExpectedObject.New(v) + } + for k, v := range other { + if v2, ok := obj[k]; ok { + return nil, fmt.Errorf("trying to overwrite already set field with partial object data: %q: %v = %v", + k, v2, v) + } + obj[k] = v + } + return obj, nil +} + +// JoinObj will execute all object operations on a specific object in a sequence. +func JoinObj(ops ...ObjectOp) ObjectOp { + if len(ops) == 0 { + return Obj{} + } else if len(ops) == 1 { + return ops[0] + } + // make sure that there is no field collision and allow only one partial + var ( + partial ObjectOp + out []processedOp + ) + required := make(FieldDescs) + for _, s := range ops { + if j, ok := s.(*opObjJoin); ok { + if j.partial != nil { + if partial != nil { + panic("only one partial transform is allowed") + } + partial = j.partial + } + for k, req := range j.allFields { + if req2, ok := required[k]; ok { + // only allow this if values are fixed and equal + if req.Fixed == nil || req2.Fixed == nil || !nodes.Equal(*req.Fixed, *req2.Fixed) { + panic(ErrDuplicateField.New(k)) + } + } + required[k] = req + } + out = append(out, j.ops...) + continue + } + fields, ok := s.Fields() + if !ok { + if partial != nil { + panic("only one partial transform is allowed") + } + partial = s + continue + } + for k, req := range fields { + if _, ok := required[k]; ok { + panic(ErrDuplicateField.New(k)) + } + required[k] = req + } + out = append(out, processedOp{op: s, fields: fields}) + } + if partial != nil { + required = nil + } + for i := 0; i < len(out); i++ { + op := out[i] + if len(op.fields) != 0 { + continue + } + if o, ok := op.op.(Obj); ok && len(o) == 0 && len(out) > 1 { + out = append(out[:i], out[i+1:]...) + i-- + } + } + return &opObjJoin{ops: out, partial: partial, allFields: required} +} + +type processedOp struct { + op ObjectOp + fields FieldDescs +} + +type opObjJoin struct { + ops []processedOp + partial ObjectOp + allFields FieldDescs +} + +func (op *opObjJoin) Kinds() nodes.Kind { + return nodes.KindObject +} + +func (op *opObjJoin) Fields() (FieldDescs, bool) { + return op.allFields.Clone(), op.partial == nil +} + +func (op *opObjJoin) Check(st *State, n nodes.Node) (bool, error) { + return checkObj(op, st, n) +} + +func (op *opObjJoin) Construct(st *State, n nodes.Node) (nodes.Node, error) { + return constructObj(op, st, n) +} + +func (op *opObjJoin) CheckObj(st *State, n nodes.Object) (bool, error) { + if !op.allFields.CheckObj(n) { + return false, nil + } + src := n + n = n.CloneObject() + for _, s := range op.ops { + sub := make(nodes.Object, len(s.fields)) + for k := range s.fields { + if v, ok := src[k]; ok { + sub[k] = v + delete(n, k) + } + } + if ok, err := s.op.CheckObj(st, sub); err != nil || !ok { + return false, err + } + } + if op.partial != nil { + if ok, err := op.partial.CheckObj(st, n); err != nil || !ok { + return false, err + } + } + return true, nil +} + +func (op *opObjJoin) ConstructObj(st *State, n nodes.Object) (nodes.Object, error) { + if n == nil { + n = make(nodes.Object) + } + // make sure that ops won't overwrite fields + if op.partial != nil { + np, err := op.partial.ConstructObj(st, make(nodes.Object)) + if err != nil { + return nil, err + } + for k, v := range np { + if v2, ok := n[k]; ok && !nodes.Equal(v, v2) { + return nil, ErrDuplicateField.New(k) + } + n[k] = v + } + } + for _, s := range op.ops { + n2, err := s.op.ConstructObj(st, make(nodes.Object)) + if err != nil { + return nil, err + } + for k, v := range n2 { + if v2, ok := n[k]; ok && !nodes.Equal(v, v2) { + return nil, ErrDuplicateField.New(k) + } else if _, ok = s.fields[k]; !ok { + return nil, fmt.Errorf("undeclared field was set: %v", k) + } + n[k] = v + } + } + return n, nil +} + +// ByFieldName will sort fields descriptions by their names. +type ByFieldName []Field + +func (arr ByFieldName) Len() int { + return len(arr) +} + +func (arr ByFieldName) Less(i, j int) bool { + return arr[i].Name < arr[j].Name +} + +func (arr ByFieldName) Swap(i, j int) { + arr[i], arr[j] = arr[j], arr[i] +} + +func Scope(name string, m Mapping) Mapping { + src, dst := m.Mapping() + return Map(OpScope(name, src), OpScope(name, dst)) +} + +func ObjScope(name string, m ObjMapping) ObjMapping { + src, dst := m.ObjMapping() + return MapObj(ObjOpScope(name, src), ObjOpScope(name, dst)) +} + +func OpScope(name string, op Op) Op { + return opScope{name: name, op: op} +} + +func ObjOpScope(name string, op ObjectOp) ObjectOp { + return opObjScope{name: name, op: op} +} + +type opObjScope struct { + name string + op ObjectOp +} + +func (op opObjScope) Kinds() nodes.Kind { + return op.op.Kinds() +} + +func (op opObjScope) Fields() (FieldDescs, bool) { + return op.op.Fields() +} + +func (op opObjScope) Check(st *State, n nodes.Node) (bool, error) { + sub := NewState() + if ok, err := op.op.Check(sub, n); err != nil || !ok { + return false, err + } + if err := st.SetStateVar(op.name, []*State{sub}); err != nil { + return false, err + } + return true, nil +} + +func (op opObjScope) CheckObj(st *State, n nodes.Object) (bool, error) { + sub := NewState() + if ok, err := op.op.CheckObj(sub, n); err != nil || !ok { + return false, err + } + if err := st.SetStateVar(op.name, []*State{sub}); err != nil { + return false, err + } + return true, nil +} + +func (op opObjScope) Construct(st *State, n nodes.Node) (nodes.Node, error) { + sts, ok := st.GetStateVar(op.name) + if !ok { + return nil, ErrVariableNotDefined.New(op.name) + } else if len(sts) != 1 { + return nil, fmt.Errorf("expected one state var, got %d", len(sts)) + } + sub := sts[0] + return op.op.Construct(sub, n) +} + +func (op opObjScope) ConstructObj(st *State, n nodes.Object) (nodes.Object, error) { + sts, ok := st.GetStateVar(op.name) + if !ok { + return nil, ErrVariableNotDefined.New(op.name) + } else if len(sts) != 1 { + return nil, fmt.Errorf("expected one state var, got %d", len(sts)) + } + sub := sts[0] + return op.op.ConstructObj(sub, n) +} + +type opScope struct { + name string + op Op +} + +func (op opScope) Kinds() nodes.Kind { + return op.op.Kinds() +} + +func (op opScope) Check(st *State, n nodes.Node) (bool, error) { + sub := NewState() + if ok, err := op.op.Check(sub, n); err != nil || !ok { + return false, err + } + if err := st.SetStateVar(op.name, []*State{sub}); err != nil { + return false, err + } + return true, nil +} + +func (op opScope) Construct(st *State, n nodes.Node) (nodes.Node, error) { + sts, ok := st.GetStateVar(op.name) + if !ok { + return nil, ErrVariableNotDefined.New(op.name) + } else if len(sts) != 1 { + return nil, fmt.Errorf("expected one state var, got %d", len(sts)) + } + sub := sts[0] + return op.op.Construct(sub, n) +} + +// Field is an operation on a specific field of an object. +type Field struct { + Name string // name of the field + // Optional can be set to make a field optional. Provided string is used as a variable name to the state of the field. + // Note that "optional" means that the field may not exists in the object, and it does not mean that the field can be nil. + // To handle nil fields, see Opt operation. + Optional string + Op Op // operation used to check/construct the field value +} + +var _ ObjectOp = Fields{} + +// Fields verifies that current node is an object and checks its fields with a +// defined operations. If field does not exist, object will be skipped. +// Reversal changes node type to object and creates all fields with a specified +// operations. +// Implementation will track a list of unprocessed object keys and will return an +// error in case the field was not used. To preserve all unprocessed keys use Part. +type Fields []Field + +func (Fields) Kinds() nodes.Kind { + return nodes.KindObject +} + +func (o Fields) Fields() (FieldDescs, bool) { + fields := make(FieldDescs, len(o)) + for _, f := range o { + fld := FieldDesc{Optional: f.Optional != ""} + if is, ok := f.Op.(opIs); ok { + n := is.n + fld.Fixed = &n + } + fields[f.Name] = fld + } + return fields, true +} + +// Check will verify that a node is an object and that fields matches a defined set of rules. +// +// If Part transform was not used, this operation will also ensure that all fields in the object are covered by field +// descriptions. If Pre was used, all unknown fields will be saved and restored to a new object on Construct. +// +// For information on optional fields see Field documentation. +func (o Fields) Check(st *State, n nodes.Node) (_ bool, gerr error) { + return checkObj(o, st, n) +} + +// Check will verify that a node is an object and that fields matches a defined set of rules. +// +// If Part transform was not used, this operation will also ensure that all fields in the object are covered by field +// descriptions. +// +// For information on optional fields see Field documentation. +func (o Fields) CheckObj(st *State, n nodes.Object) (bool, error) { + for _, f := range o { + n, ok := n[f.Name] + if f.Optional != "" { + if err := st.SetVar(f.Optional, nodes.Bool(ok)); err != nil { + return false, errKey.Wrap(err, f.Name) + } + if !ok { + continue + } + } + if !ok { + if errorOnFilterCheck { + return filtered("field %+v is missing in %+v\n%+v", f, n, o) + } + return false, nil + } + ok, err := f.Op.Check(st, n) + if err != nil { + return false, errKey.Wrap(err, f.Name) + } else if !ok { + return false, nil + } + } + if !allowUnusedFields { + set, _ := o.Fields() // TODO: optimize + for k := range n { + if _, ok := set[k]; !ok { + return false, ErrUnusedField.New(k) + } + } + } + return true, nil +} + +// Construct will create a new object and will populate it's fields according to field descriptions. +// If Part was used, it will also restore all unhandled fields. +func (o Fields) Construct(st *State, n nodes.Node) (nodes.Node, error) { + return constructObj(o, st, n) +} + +// ConstructObj will create a new object and will populate it's fields according to field descriptions. +func (o Fields) ConstructObj(st *State, obj nodes.Object) (nodes.Object, error) { + if obj == nil { + obj = make(nodes.Object, len(o)) + } + for _, f := range o { + if f.Optional != "" { + on, err := st.MustGetVar(f.Optional) + if err != nil { + return obj, errKey.Wrap(err, f.Name) + } + exists, ok := on.(nodes.Bool) + if !ok { + return obj, errKey.Wrap(ErrUnexpectedType.New(nodes.Bool(false), on), f.Name) + } + if !exists { + continue + } + } + v, err := f.Op.Construct(st, nil) + if err != nil { + return obj, errKey.Wrap(err, f.Name) + } + obj[f.Name] = v + } + return obj, nil +} + +// String asserts that value equals a specific string value. +func String(val string) MappingOp { + return Is(nodes.String(val)) +} + +// Int asserts that value equals a specific integer value. +func Int(val int) MappingOp { + return Is(nodes.Int(val)) +} + +// Uint asserts that value equals a specific unsigned integer value. +func Uint(val uint) MappingOp { + return Is(nodes.Uint(val)) +} + +// Bool asserts that value equals a specific boolean value. +func Bool(val bool) MappingOp { + return Is(nodes.Bool(val)) +} + +var _ ObjMapping = ObjMap{} + +type ObjMap map[string]Mapping + +func (m ObjMap) Mapping() (src, dst Op) { + return m.ObjMapping() +} + +func (m ObjMap) ObjMapping() (src, dst ObjectOp) { + so, do := make(Obj, len(m)), make(Obj, len(m)) + for k, f := range m { + so[k], do[k] = f.Mapping() + } + return so, do +} + +// TypedObj is a shorthand for an object with a specific type +// and multiples operations on it. +func TypedObj(typ string, ops map[string]Op) Op { + obj := Obj(ops) + obj[uast.KeyType] = String(typ) + return obj +} + +// Lookup uses a value of current node to find a replacement for it +// in the map and checks result with op. +// The reverse step will use a reverse map to lookup value created by +// op and will assign it to the current node. +// Since reversal transformation needs to build a reverse map, +// the mapping should not be ambiguous in reverse direction (no duplicate values). +func Lookup(op Op, m map[nodes.Value]nodes.Value) Op { + rev := make(map[nodes.Value]nodes.Value, len(m)) + for k, v := range m { + if _, ok := rev[v]; ok { + panic(ErrAmbiguousValue.New("map has ambigous value %v", v)) + } + rev[v] = k + } + return &opLookup{op: op, fwd: m, rev: rev} +} + +type opLookup struct { + op Op + fwd, rev map[nodes.Value]nodes.Value +} + +func (*opLookup) Kinds() nodes.Kind { + return nodes.KindsValues +} + +func (op *opLookup) Check(st *State, n nodes.Node) (bool, error) { + v, ok := n.(nodes.Value) + if !ok { + return false, nil + } + vn, ok := op.fwd[v] + if !ok { + return false, ErrUnhandledValueIn.New(v, op.fwd) + } + return op.op.Check(st, vn) +} + +func (op *opLookup) Construct(st *State, n nodes.Node) (nodes.Node, error) { + if err := noNode(n); err != nil { + return nil, err + } + nn, err := op.op.Construct(st, nil) + if err != nil { + return nil, err + } + v, ok := nn.(nodes.Value) + if !ok { + return nil, ErrExpectedValue.New(n) + } + vn, ok := op.rev[v] + if !ok { + return nil, ErrUnhandledValueIn.New(v, op.rev) + } + return vn, nil +} + +// LookupVar is a shorthand to lookup value stored in variable. +func LookupVar(vr string, m map[nodes.Value]nodes.Value) Op { + return Lookup(Var(vr), m) +} + +// LookupOpVar is a conditional branch that takes a value of a variable and +// checks the map to find an appropriate operation to apply to current node. +// Note that the variable must be defined prior to this transformation, thus +// You might need to use Pre to define a variable used in this condition. +func LookupOpVar(vr string, cases map[nodes.Value]Op) Op { + def := cases[nil] + delete(cases, nil) + return &opLookupOp{vr: vr, cases: cases, def: def} +} + +type opLookupOp struct { + vr string + def Op + cases map[nodes.Value]Op +} + +func (*opLookupOp) Kinds() nodes.Kind { + return nodes.KindsAny +} + +func (op *opLookupOp) eval(st *State) (Op, error) { + vn, err := st.MustGetVar(op.vr) + if err != nil { + return nil, err + } + v, ok := vn.(nodes.Value) + if !ok { + return nil, ErrExpectedValue.New(vn) + } + sub, ok := op.cases[v] + if !ok { + if op.def == nil { + return nil, ErrUnhandledValueIn.New(v, op.cases) + } + sub = op.def + } + return sub, nil +} + +func (op opLookupOp) Check(st *State, n nodes.Node) (bool, error) { + sub, err := op.eval(st) + if err != nil { + return false, err + } + return sub.Check(st, n) +} + +func (op opLookupOp) Construct(st *State, n nodes.Node) (nodes.Node, error) { + sub, err := op.eval(st) + if err != nil { + return nil, err + } + return sub.Construct(st, n) +} + +// ValueFunc is a function that transforms values. +type ValueFunc func(nodes.Value) (nodes.Value, error) + +// ValueConv converts a value with a provided function and passes it to sub-operation. +func ValueConv(on Op, conv, rev ValueFunc) Op { + return valueConvKind(on, nodes.KindsValues, conv, rev) +} + +func valueConvKind(on Op, kinds nodes.Kind, conv, rev ValueFunc) Op { + return &opValueConv{op: on, kinds: kinds & nodes.KindsValues, conv: conv, rev: rev} +} + +// StringFunc is a function that transforms string values. +type StringFunc func(string) (string, error) + +// StringConv is like ValueConv, but only processes string arguments. +func StringConv(on Op, conv, rev StringFunc) Op { + apply := func(fnc StringFunc) ValueFunc { + return func(v nodes.Value) (nodes.Value, error) { + sv, ok := v.(nodes.String) + if !ok { + return nil, ErrUnexpectedType.New(nodes.String(""), v) + } + s, err := fnc(string(sv)) + if err != nil { + return nil, err + } + return nodes.String(s), nil + } + } + return valueConvKind(on, nodes.KindString, apply(conv), apply(rev)) +} + +type opValueConv struct { + op Op + kinds nodes.Kind + conv, rev ValueFunc +} + +func (op *opValueConv) Kinds() nodes.Kind { + return op.kinds +} + +func (op *opValueConv) Check(st *State, n nodes.Node) (bool, error) { + v, ok := n.(nodes.Value) + if !ok { + return false, nil + } + nv, err := op.conv(v) + if ErrUnexpectedType.Is(err) { + return false, nil // skip type mismatch errors on check + } else if err != nil { + return false, err + } + return op.op.Check(st, nv) +} + +func (op opValueConv) Construct(st *State, n nodes.Node) (nodes.Node, error) { + n, err := op.op.Construct(st, n) + if err != nil { + return nil, err + } + v, ok := n.(nodes.Value) + if !ok { + return nil, ErrExpectedValue.New(n) + } + nv, err := op.rev(v) + if err != nil { + return nil, err + } + return nv, nil +} + +// If checks if a named variable value is true and executes one of sub-operations. +func If(cond string, then, els Op) Op { + return &opIf{cond: cond, then: then, els: els} +} + +type opIf struct { + cond string + then, els Op +} + +func (op *opIf) Kinds() nodes.Kind { + return op.then.Kinds() | op.els.Kinds() +} + +func (op *opIf) Check(st *State, n nodes.Node) (bool, error) { + st1 := st.Clone() + ok1, err1 := op.then.Check(st1, n) + if ok1 && err1 == nil { + st.ApplyFrom(st1) + st.SetVar(op.cond, nodes.Bool(true)) + return true, nil + } + st2 := st.Clone() + ok2, err2 := op.els.Check(st2, n) + if ok2 && err2 == nil { + st.ApplyFrom(st2) + st.SetVar(op.cond, nodes.Bool(false)) + return true, nil + } + err := err1 + if err == nil { + err = err2 + } + return false, err +} + +func (op *opIf) Construct(st *State, n nodes.Node) (nodes.Node, error) { + vn, err := st.MustGetVar(op.cond) + if err != nil { + return nil, err + } + cond, ok := vn.(nodes.Bool) + if !ok { + return nil, ErrUnexpectedType.New(nodes.Bool(false), vn) + } + if cond { + return op.then.Construct(st, n) + } + return op.els.Construct(st, n) +} + +// NotEmpty checks that node is not nil and contains one or more fields or elements. +func NotEmpty(op Op) Op { + return &opNotEmpty{op: op} +} + +type opNotEmpty struct { + op Op +} + +func (*opNotEmpty) Kinds() nodes.Kind { + return nodes.KindsNotNil +} + +func (op *opNotEmpty) Check(st *State, n nodes.Node) (bool, error) { + switch n := n.(type) { + case nil: + return filtered("empty value %T for %v", n, op) + case nodes.Array: + if len(n) == 0 { + return filtered("empty value %T for %v", n, op) + } + case nodes.Object: + if len(n) == 0 { + return filtered("empty value %T for %v", n, op) + } + } + return op.op.Check(st, n) +} + +func (op *opNotEmpty) Construct(st *State, n nodes.Node) (nodes.Node, error) { + n, err := op.op.Construct(st, n) + if err != nil { + return nil, err + } + switch n := n.(type) { + case nil: + return nil, ErrUnexpectedValue.New(n) + case nodes.Array: + if len(n) == 0 { + return nil, ErrUnexpectedValue.New(n) + } + case nodes.Object: + if len(n) == 0 { + return nil, ErrUnexpectedValue.New(n) + } + } + return n, nil +} + +// Opt is an optional operation that uses a named variable to store the state. +func Opt(exists string, op Op) Op { + return &opOptional{vr: exists, op: op} +} + +type opOptional struct { + vr string + op Op +} + +func (op *opOptional) Kinds() nodes.Kind { + return nodes.KindNil | op.op.Kinds() +} + +func (op *opOptional) Check(st *State, n nodes.Node) (bool, error) { + if err := st.SetVar(op.vr, nodes.Bool(n != nil)); err != nil { + return false, err + } + if n == nil { + return true, nil + } + return op.op.Check(st, n) +} + +func (op *opOptional) Construct(st *State, n nodes.Node) (nodes.Node, error) { + vn, err := st.MustGetVar(op.vr) + if err != nil { + return nil, err + } + exists, ok := vn.(nodes.Bool) + if !ok { + return nil, ErrUnexpectedType.New(nodes.Bool(false), vn) + } + if !exists { + return nil, nil + } + return op.op.Construct(st, n) +} + +// Check tests first check-only operation before applying the main op. It won't use the check-only argument for Construct. +// The check-only operation will not be able to set any variables or change state by other means. +func Check(s Sel, op Op) Op { + return &opCheck{sel: s, op: op} +} + +type opCheck struct { + sel Sel + op Op +} + +func (op *opCheck) Kinds() nodes.Kind { + return op.sel.Kinds() & op.op.Kinds() +} + +func (op *opCheck) Check(st *State, n nodes.Node) (bool, error) { + if ok, err := op.sel.Check(st.Clone(), n); err != nil || !ok { + return ok, err + } + return op.op.Check(st, n) +} + +func (op *opCheck) Construct(st *State, n nodes.Node) (nodes.Node, error) { + return op.op.Construct(st, n) +} + +// Not negates the check. +func Not(s Sel) Sel { + return &opNot{sel: s} +} + +type opNot struct { + sel Sel +} + +func (*opNot) Kinds() nodes.Kind { + return nodes.KindsAny // can't be sure +} + +func (op *opNot) Check(st *State, n nodes.Node) (bool, error) { + ok, err := op.sel.Check(st.Clone(), n) + if err != nil { + return false, err + } + return !ok, nil +} + +// Not nil is a condition that ensures that node is not nil. +func NotNil() Sel { + return Not(Is(nil)) +} + +// And serves as a logical And operation for conditions. +func And(sels ...Sel) Sel { + return opAnd(sels) +} + +type opAnd []Sel + +func (op opAnd) Kinds() nodes.Kind { + var k nodes.Kind + for _, s := range op { + k &= s.Kinds() + } + return k +} + +func (op opAnd) Check(st *State, n nodes.Node) (bool, error) { + for _, sub := range op { + if ok, err := sub.Check(st.Clone(), n); err != nil { + return false, err + } else if !ok { + return false, nil + } + } + return true, nil +} + +// Any check matches if any of list elements matches sub-check. +func Any(s Sel) Sel { + if s == nil { + s = Is(nil) + } + return &opAny{sel: s} +} + +type opAny struct { + sel Sel +} + +func (*opAny) Kinds() nodes.Kind { + return nodes.KindArray +} + +func (op *opAny) Check(st *State, n nodes.Node) (bool, error) { + l, ok := n.(nodes.Array) + if !ok { + return false, nil + } + for _, o := range l { + if ok, err := op.sel.Check(st.Clone(), o); err != nil { + return false, err + } else if ok { + return true, nil + } + } + return false, nil +} + +// All check matches if all list elements matches sub-check. +func All(s Sel) Sel { + return &opAll{sel: s} +} + +type opAll struct { + sel Sel +} + +func (*opAll) Kinds() nodes.Kind { + return nodes.KindArray +} + +func (op *opAll) Check(st *State, n nodes.Node) (bool, error) { + l, ok := n.(nodes.Array) + if !ok { + return false, nil + } + for _, o := range l { + if ok, err := op.sel.Check(st.Clone(), o); err != nil || !ok { + return false, err + } + } + return true, nil +} + +var _ Sel = Has{} + +func HasType(o interface{}) Sel { + var typ string + if s, ok := o.(string); ok { + typ = s + } else { + typ = uast.TypeOf(o) + } + return Has{uast.KeyType: String(typ)} +} + +// Has is a check-only operation that verifies that object has specific fields and they match given checks. +type Has map[string]Sel + +func (Has) Kinds() nodes.Kind { + return nodes.KindObject +} + +// Check verifies that specified fields exists and matches the provided sub-operations. +func (m Has) Check(st *State, n nodes.Node) (bool, error) { + o, ok := n.(nodes.Object) + if !ok { + return false, nil + } + for k, sel := range m { + v, ok := o[k] + if !ok { + return false, nil + } + if ok, err := sel.Check(st.Clone(), v); err != nil || !ok { + return false, err + } + } + return true, nil +} + +// In check that the node is a value from a given list. +func In(vals ...nodes.Value) Sel { + m := make(map[nodes.Value]struct{}, len(vals)) + for _, v := range vals { + m[v] = struct{}{} + } + return &opIn{m: m} +} + +type opIn struct { + m map[nodes.Value]struct{} +} + +func (op *opIn) Kinds() nodes.Kind { + var k nodes.Kind + for v := range op.m { + k |= nodes.KindOf(v) + } + return k +} + +func (op *opIn) Check(st *State, n nodes.Node) (bool, error) { + v, ok := n.(nodes.Value) + if !ok && n != nil { + return false, nil + } + _, ok = op.m[v] + return ok, nil +} + +func Cases(vr string, cases ...Op) Op { + return &opCases{vr: vr, cases: cases} +} + +func CasesObj(vr string, common ObjectOp, cases ObjectOps) ObjectOp { + list := cases.ObjectOps() + if len(list) == 0 { + panic("no cases") + } + var fields FieldDescs + for i, c := range list { + // do not allow partial or optional - it might indicate a mistake + arr, ok := c.Fields() + if !ok { + panic("partial transforms are not allowed in Cases") + } + for _, f := range arr { + if f.Optional { + panic("optional fields are not allowed in Cases") + } + } + if i == 0 { + // use as a baseline wipe all specific constraints (might differ in cases) + fields = make(FieldDescs, len(arr)) + for k := range arr { + fields[k] = FieldDesc{Optional: false} + } + continue + } + // check that all other cases mention the case set of fields + if len(arr) != len(fields) { + panic("all cases should have the same number of fields") + } + for k := range arr { + if _, ok := fields[k]; !ok { + panic(fmt.Errorf("field %s does not exists in case %d", k, i)) + } + } + } + var op ObjectOp = &opObjCases{vr: vr, cases: list, fields: fields} + if common != nil { + op = JoinObj(common, op) + } + return op +} + +type opCases struct { + vr string + cases []Op +} + +func (op *opCases) Kinds() nodes.Kind { + var k nodes.Kind + for _, s := range op.cases { + k |= s.Kinds() + } + return k +} + +func (op *opCases) Check(st *State, n nodes.Node) (bool, error) { + // find the first cases that matches and write an index to a variable + for i, s := range op.cases { + ls := st.Clone() + if ok, err := s.Check(ls, n); err != nil { + return false, err + } else if ok { + st.ApplyFrom(ls) + if err = st.SetVar(op.vr, nodes.Int(i)); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + +func (op *opCases) Construct(st *State, n nodes.Node) (nodes.Node, error) { + // use the variable to decide what branch to take + v, err := st.MustGetVar(op.vr) + if err != nil { + return nil, err + } + i, ok := v.(nodes.Int) + if !ok || i < 0 || int(i) >= len(op.cases) { + return nil, ErrUnexpectedValue.New(v) + } + return op.cases[i].Construct(st, n) +} + +type opObjCases struct { + vr string + fields FieldDescs + cases []ObjectOp +} + +func (op *opObjCases) Fields() (FieldDescs, bool) { + return op.fields.Clone(), true +} + +func (op *opObjCases) Kinds() nodes.Kind { + var k nodes.Kind + for _, s := range op.cases { + k |= s.Kinds() + } + return k +} + +func (op *opObjCases) Check(st *State, n nodes.Node) (bool, error) { + return checkObj(op, st, n) +} + +func (op *opObjCases) Construct(st *State, n nodes.Node) (nodes.Node, error) { + return constructObj(op, st, n) +} + +func (op *opObjCases) CheckObj(st *State, n nodes.Object) (bool, error) { + // find the first cases that matches and write an index to a variable + for i, s := range op.cases { + ls := st.Clone() + if ok, err := s.CheckObj(ls, n); err != nil { + return false, err + } else if ok { + st.ApplyFrom(ls) + if err = st.SetVar(op.vr, nodes.Int(i)); err != nil { + return false, err + } + return true, nil + } + } + return false, nil +} + +func (op *opObjCases) ConstructObj(st *State, n nodes.Object) (nodes.Object, error) { + // use the variable to decide what branch to take + v, err := st.MustGetVar(op.vr) + if err != nil { + return nil, err + } + i, ok := v.(nodes.Int) + if !ok || i < 0 || int(i) >= len(op.cases) { + return nil, ErrUnexpectedValue.New(v) + } + return op.cases[i].ConstructObj(st, n) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/semantic.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/semantic.go new file mode 100644 index 0000000..234c552 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/semantic.go @@ -0,0 +1,185 @@ +package transformer + +import ( + "fmt" + "strings" + "unicode" + + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +func uastType(uobj interface{}, op ObjectOp, part string) ObjectOp { + if op == nil { + op = Obj{} + } + utyp := uast.TypeOf(uobj) + if utyp == "" { + panic(fmt.Errorf("type is not registered: %T", uobj)) + } + obj := Obj{uast.KeyType: String(utyp)} + if part != "" { + return JoinObj(obj, Part(part, op)) + } + fields, ok := op.Fields() + if !ok { + return JoinObj(obj, op) + } + zero, opt := uast.NewObjectByTypeOpt(utyp) + delete(zero, uast.KeyType) + if len(zero) == 0 { + return JoinObj(obj, op) + } + for k := range fields { + if k == uast.KeyType { + continue + } + _, ok := zero[k] + _, ok2 := opt[k] + if !ok && !ok2 { + panic(ErrUndefinedField.New(utyp + "." + k)) + } + delete(zero, k) + } + for k, v := range zero { + obj[k] = Is(v) + } + return JoinObj(obj, op) +} + +func UASTType(uobj interface{}, op ObjectOp) ObjectOp { + return uastType(uobj, op, "") +} + +func UASTTypePart(vr string, uobj interface{}, op ObjectOp) ObjectOp { + return uastType(uobj, op, vr) +} + +func remapPos(m ObjMapping, names map[string]string) ObjMapping { + so, do := m.ObjMapping() // TODO: clone? + + sp := UASTType(uast.Positions{}, Fields{ + {Name: uast.KeyStart, Op: Var(uast.KeyStart), Optional: uast.KeyStart + "_exists"}, + {Name: uast.KeyEnd, Op: Var(uast.KeyEnd), Optional: uast.KeyEnd + "_exists"}, + }) + dp := UASTType(uast.Positions{}, Fields{ + {Name: uast.KeyStart, Op: Var(uast.KeyStart), Optional: uast.KeyStart + "_exists"}, + {Name: uast.KeyEnd, Op: Var(uast.KeyEnd), Optional: uast.KeyEnd + "_exists"}, + }) + if len(names) != 0 { + sa, da := make(Obj), make(Obj) + for k, v := range names { + sa[k] = Var(v) + if v != uast.KeyStart && v != uast.KeyEnd { + da[k] = Var(v) + } + } + sp, dp = JoinObj(sp, sa), JoinObj(dp, da) + } + return MapObj( + JoinObj(so, Obj{uast.KeyPos: sp}), + JoinObj(do, Obj{uast.KeyPos: dp}), + ) +} + +func MapSemantic(nativeType string, semType interface{}, m ObjMapping) ObjMapping { + return MapSemanticPos(nativeType, semType, nil, m) +} + +func MapSemanticPos(nativeType string, semType interface{}, pos map[string]string, m ObjMapping) ObjMapping { + so, do := m.ObjMapping() // TODO: clone? + so = JoinObj(Obj{uast.KeyType: String(nativeType)}, so) + so, do = remapPos(MapObj(so, do), pos).ObjMapping() + return MapObj(so, UASTType(semType, do)) +} + +func CommentText(tokens [2]string, vr string) Op { + return commentUAST{ + tokens: tokens, + text: vr + "_text", pref: vr + "_pref", suff: vr + "_suff", tab: vr + "_tab", + } +} + +func CommentNode(block bool, vr string, pos Op) ObjectOp { + obj := Obj{ + "Block": Bool(block), + "Text": Var(vr + "_text"), + "Prefix": Var(vr + "_pref"), + "Suffix": Var(vr + "_suff"), + "Tab": Var(vr + "_tab"), + } + if pos != nil { + obj[uast.KeyPos] = pos + } + return UASTType(uast.Comment{}, obj) +} + +type commentUAST struct { + tokens [2]string + text string + pref, suff, tab string +} + +func (commentUAST) Kinds() nodes.Kind { + return nodes.KindString +} + +func (op commentUAST) Check(st *State, n nodes.Node) (bool, error) { + s, ok := n.(nodes.String) + if !ok { + return false, nil + } + text := string(s) + if !strings.HasPrefix(text, op.tokens[0]) || !strings.HasSuffix(text, op.tokens[1]) { + return false, nil + } + text = strings.TrimPrefix(text, op.tokens[0]) + text = strings.TrimSuffix(text, op.tokens[1]) + var ( + pref, suff, tab string + ) + + // find prefix + i := 0 + for ; i < len(text); i++ { + if r := rune(text[i]); unicode.IsLetter(r) || unicode.IsNumber(r) { + break + } + } + pref = text[:i] + text = text[i:] + + // find suffix + i = len(text) - 1 + for ; i >= 0 && unicode.IsSpace(rune(text[i])); i-- { + } + suff = text[i+1:] + text = text[:i+1] + + // TODO: set tab + + err := st.SetVars(Vars{ + op.text: nodes.String(text), + op.pref: nodes.String(pref), + op.suff: nodes.String(suff), + op.tab: nodes.String(tab), + }) + return err == nil, err +} + +func (op commentUAST) Construct(st *State, n nodes.Node) (nodes.Node, error) { + var ( + text, pref, suff, tab nodes.String + ) + + err := st.MustGetVars(VarsPtrs{ + op.text: &text, + op.pref: &pref, op.suff: &suff, op.tab: &tab, + }) + if err != nil { + return nil, err + } + // FIXME: handle tab + text = pref + text + suff + return nodes.String(op.tokens[0] + string(text) + op.tokens[1]), nil +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/transformer.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/transformer.go new file mode 100644 index 0000000..ee60574 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/transformer/transformer.go @@ -0,0 +1,513 @@ +package transformer + +import ( + "sort" + "strings" + + "gopkg.in/bblfsh/sdk.v2/uast" + "gopkg.in/bblfsh/sdk.v2/uast/nodes" +) + +const optimizeCheck = true + +// Transformer is an interface for transformations that operates on AST trees. +// An implementation is responsible for walking the tree and executing transformation on each AST node. +type Transformer interface { + Do(root nodes.Node) (nodes.Node, error) +} + +// CodeTransformer is a special case of Transformer that needs an original source code to operate. +type CodeTransformer interface { + OnCode(code string) Transformer +} + +// Sel is an operation that can verify if a specific node matches a set of constraints or not. +type Sel interface { + // Kinds returns a mask of all nodes kinds that this operation might match. + Kinds() nodes.Kind + // Check will verify constraints for a single node and returns true if an objects matches them. + // It can also populate the State with variables that can be used later to Construct a different object from the State. + Check(st *State, n nodes.Node) (bool, error) +} + +// Mod is an operation that can reconstruct an AST node from a given State. +type Mod interface { + // Construct will use variables stored in State to reconstruct an AST node. + // Node that is provided as an argument may be used as a base for reconstruction. + Construct(st *State, n nodes.Node) (nodes.Node, error) +} + +// Op is a generic AST transformation step that describes a shape of an AST tree. +// It can be used to either check the constraints for a specific node and populate state, or to reconstruct an AST shape +// from a the same state (probably produced by another Op). +type Op interface { + Sel + Mod +} + +// Transformers appends all provided transformer slices into single one. +func Transformers(arr ...[]Transformer) []Transformer { + var out []Transformer + for _, a := range arr { + out = append(out, a...) + } + return out +} + +var _ Transformer = (TransformFunc)(nil) + +// TransformFunc is a function that will be applied to each AST node to transform the tree. +// It returns a new AST and true if tree was changed, or an old node and false if no modifications were done. +// The the tree will be traversed automatically and the callback will be called for each node. +type TransformFunc func(n nodes.Node) (nodes.Node, bool, error) + +// Do runs a transformation function for each AST node. +func (f TransformFunc) Do(n nodes.Node) (nodes.Node, error) { + var last error + nn, ok := nodes.Apply(n, func(n nodes.Node) (nodes.Node, bool) { + nn, ok, err := f(n) + if err != nil { + last = err + return n, false + } else if !ok { + return n, false + } + return nn, ok + }) + if ok { + return nn, last + } + return n, last +} + +var _ Transformer = (TransformObjFunc)(nil) + +// TransformObjFunc is like TransformFunc, but only matches Object nodes. +type TransformObjFunc func(n nodes.Object) (nodes.Object, bool, error) + +// Func converts this TransformObjFunc to a regular TransformFunc by skipping all non-object nodes. +func (f TransformObjFunc) Func() TransformFunc { + return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) { + obj, ok := n.(nodes.Object) + if !ok { + return n, false, nil + } + nn, ok, err := f(obj) + if err != nil { + return n, false, err + } else if !ok { + return n, false, nil + } + return nn, ok, nil + }) +} + +// Do runs a transformation function for each AST node. +func (f TransformObjFunc) Do(n nodes.Node) (nodes.Node, error) { + return f.Func().Do(n) +} + +// Map creates a two-way mapping between two transform operations. +// The first operation will be used to check constraints for each node and store state, while the second one will use +// the state to construct a new tree. +func Map(src, dst Op) Mapping { + return mapping{src: src, dst: dst} +} + +func MapObj(src, dst ObjectOp) ObjMapping { + return objMapping{src: src, dst: dst} +} + +func MapPart(vr string, m ObjMapping) ObjMapping { + src, dst := m.ObjMapping() + _, sok := src.Fields() + _, dok := dst.Fields() + if !sok && !dok { + // both contain partial op, ignore current label + return MapObj(src, dst) + } else if sok != dok { + panic("inconsistent use of Part") + } + return MapObj(Part(vr, src), Part(vr, dst)) +} + +func Identity(op Op) Mapping { + return Map(op, op) +} + +type Mapping interface { + Mapping() (src, dst Op) +} + +type ObjMapping interface { + Mapping + ObjMapping() (src, dst ObjectOp) +} + +type MappingOp interface { + Op + Mapping +} + +type mapping struct { + src, dst Op +} + +func (m mapping) Mapping() (src, dst Op) { + return m.src, m.dst +} + +type objMapping struct { + src, dst ObjectOp +} + +func (m objMapping) Mapping() (src, dst Op) { + return m.src, m.dst +} + +func (m objMapping) ObjMapping() (src, dst ObjectOp) { + return m.src, m.dst +} + +// Reverse changes a transformation direction, allowing to construct the source tree. +func Reverse(m Mapping) Mapping { + src, dst := m.Mapping() + return Map(dst, src) +} + +func (m mapping) apply(root nodes.Node) (nodes.Node, error) { + src, dst := m.src, m.dst + var errs []error + _, objOp := src.(ObjectOp) + _, arrOp := src.(ArrayOp) + st := NewState() + nn, ok := nodes.Apply(root, func(n nodes.Node) (nodes.Node, bool) { + if n != nil { + if objOp { + if _, ok := n.(nodes.Object); !ok { + return n, false + } + } else if arrOp { + if _, ok := n.(nodes.Array); !ok { + return n, false + } + } + } + st.Reset() + if ok, err := src.Check(st, n); err != nil { + errs = append(errs, errCheck.Wrap(err)) + return n, false + } else if !ok { + return n, false + } + nn, err := dst.Construct(st, nil) + if err != nil { + errs = append(errs, errConstruct.Wrap(err)) + return n, false + } + return nn, true + }) + err := NewMultiError(errs...) + if ok { + return nn, err + } + return root, err +} + +// Mappings takes multiple mappings and optimizes the process of applying them as a single transformation. +func Mappings(maps ...Mapping) Transformer { + if len(maps) == 0 { + return mappings{} + } + mp := mappings{ + all: maps, + } + if optimizeCheck { + mp.byKind = make(map[nodes.Kind][]Mapping) + mp.index() + } + return mp +} + +type mappings struct { + all []Mapping + + // indexed mappings + + byKind map[nodes.Kind][]Mapping // mappings applied to specific node kind + + typedObj map[string][]Mapping // mappings for objects with specific type + typedAny []Mapping // mappings for any typed object (operations that does not mention the type) +} + +func (m *mappings) index() { + precompile := func(m Mapping) Mapping { + return Map(m.Mapping()) + } + type ordered struct { + ind int + mp Mapping + } + var typedAny []ordered + typed := make(map[string][]ordered) + for i, mp := range m.all { + // pre-compile object operations (sort fields for unordered ops, etc) + mp = precompile(mp) + + oop, _ := mp.Mapping() + if chk, ok := oop.(*opCheck); ok { + oop = chk.op + } + // switch by operation type and make a separate list + // next time we will see a node with matching type, we will apply only specific ops + for _, k := range oop.Kinds().Split() { + m.byKind[k] = append(m.byKind[k], mp) + } + switch op := oop.(type) { + case ObjectOp: + specific := false + fields, _ := op.Fields() + if f, ok := fields[uast.KeyType]; ok && !f.Optional { + if f.Fixed != nil { + typ := *f.Fixed + if typ, ok := typ.(nodes.String); ok { + s := string(typ) + typed[s] = append(typed[s], ordered{ind: i, mp: mp}) + specific = true + } + } + } + if !specific { + typedAny = append(typedAny, ordered{ind: i, mp: mp}) + } + default: + // the type is unknown, thus we should try to apply it to objects and array as well + typedAny = append(typedAny, ordered{ind: i, mp: mp}) + } + } + m.typedObj = make(map[string][]Mapping, len(typed)) + for typ, ord := range typed { + ord = append(ord, typedAny...) + sort.Slice(ord, func(i, j int) bool { + return ord[i].ind < ord[j].ind + }) + maps := make([]Mapping, 0, len(ord)) + for _, o := range ord { + maps = append(maps, o.mp) + } + m.typedObj[typ] = maps + } +} + +func (m mappings) Do(root nodes.Node) (nodes.Node, error) { + var errs []error + st := NewState() + nn, ok := nodes.Apply(root, func(old nodes.Node) (nodes.Node, bool) { + var maps []Mapping + if !optimizeCheck { + maps = m.all + } else { + maps = m.byKind[nodes.KindOf(old)] + switch old := old.(type) { + case nodes.Object: + if typ, ok := old[uast.KeyType].(nodes.String); ok { + if mp, ok := m.typedObj[string(typ)]; ok { + maps = mp + } + } + } + } + + n := old + applied := false + for _, mp := range maps { + src, dst := mp.Mapping() + st.Reset() + if ok, err := src.Check(st, n); err != nil { + errs = append(errs, errCheck.Wrap(err)) + continue + } else if !ok { + continue + } + applied = true + + nn, err := dst.Construct(st, nil) + if err != nil { + errs = append(errs, errConstruct.Wrap(err)) + continue + } + n = nn + } + + if !applied { + return old, false + } + return n, true + }) + err := NewMultiError(errs...) + if ok { + return nn, err + } + return root, err +} + +// NewState creates a new state for Ops to work on. +// It stores variables, flags and anything that necessary +// for transformation steps to persist data. +func NewState() *State { + return &State{} +} + +// Vars is a set of variables with their values. +type Vars map[string]nodes.Node + +// State stores all variables (placeholder values, flags and wny other state) between Check and Construct steps. +type State struct { + vars Vars + states map[string][]*State +} + +// Reset clears the state and allows to reuse an object. +func (st *State) Reset() { + st.vars = nil + st.states = nil +} + +// Clone will return a copy of the State. This can be used to apply Check and throw away any variables produced by it. +// To merge a cloned state back use ApplyFrom on a parent state. +func (st *State) Clone() *State { + st2 := NewState() + if len(st.vars) != 0 { + st2.vars = make(Vars) + } + for k, v := range st.vars { + st2.vars[k] = v + } + if len(st.states) != 0 { + st2.states = make(map[string][]*State) + } + for k, v := range st.states { + st2.states[k] = v + } + return st2 +} + +// ApplyFrom merges a provided state into this state object. +func (st *State) ApplyFrom(st2 *State) { + if len(st2.vars) != 0 && st.vars == nil { + st.vars = make(Vars) + } + for k, v := range st2.vars { + if _, ok := st.vars[k]; !ok { + st.vars[k] = v + } + } + if len(st2.states) != 0 && st.states == nil { + st.states = make(map[string][]*State) + } + for k, v := range st2.states { + if _, ok := st.states[k]; !ok { + st.states[k] = v + } + } +} + +// GetVar looks up a named variable. +func (st *State) GetVar(name string) (nodes.Node, bool) { + n, ok := st.vars[name] + return n, ok +} + +// MustGetVar looks up a named variable and returns ErrVariableNotDefined in case it does not exists. +func (st *State) MustGetVar(name string) (nodes.Node, error) { + n, ok := st.GetVar(name) + if !ok { + return nil, ErrVariableNotDefined.New(name) + } + return n, nil +} + +// VarsPtrs is a set of variable pointers. +type VarsPtrs map[string]nodes.NodePtr + +// MustGetVars is like MustGetVar but fetches multiple variables in one operation. +func (st *State) MustGetVars(vars VarsPtrs) error { + for name, dst := range vars { + n, ok := st.GetVar(name) + if !ok { + return ErrVariableNotDefined.New(name) + } + if err := dst.SetNode(n); err != nil { + return err + } + } + return nil +} + +// SetVar sets a named variable. It will return ErrVariableRedeclared if a variable with the same name is already set. +// It will ignore the operation if variable already exists and has the same value (nodes.Value). +func (st *State) SetVar(name string, val nodes.Node) error { + cur, ok := st.vars[name] + if !ok { + // not declared + if st.vars == nil { + st.vars = make(Vars) + } + st.vars[name] = val + return nil + } + if nodes.Equal(cur, val) { + // already declared, and the same value is already in the map + return nil + } + return ErrVariableRedeclared.New(name, cur, val) +} + +// SetVars is like SetVar but sets multiple variables in one operation. +func (st *State) SetVars(vars Vars) error { + for k, v := range vars { + if err := st.SetVar(k, v); err != nil { + return err + } + } + return nil +} + +// GetStateVar returns a stored sub-state from a named variable. +func (st *State) GetStateVar(name string) ([]*State, bool) { + n, ok := st.states[name] + return n, ok +} + +// SetStateVar sets a sub-state variable. It returns ErrVariableRedeclared if the variable with this name already exists. +func (st *State) SetStateVar(name string, sub []*State) error { + cur, ok := st.states[name] + if ok { + return ErrVariableRedeclared.New(name, cur, sub) + } + if st.states == nil { + st.states = make(map[string][]*State) + } + st.states[name] = sub + return nil +} + +// DefaultNamespace is a transform that sets a specified namespace for predicates and values that doesn't have a namespace. +func DefaultNamespace(ns string) Transformer { + return TransformFunc(func(n nodes.Node) (nodes.Node, bool, error) { + obj, ok := n.(nodes.Object) + if !ok { + return n, false, nil + } + tp, ok := obj[uast.KeyType].(nodes.String) + if !ok { + return n, false, nil + } + if strings.Contains(string(tp), ":") { + return n, false, nil + } + obj = obj.CloneObject() + obj[uast.KeyType] = nodes.String(ns + ":" + string(tp)) + return obj, true, nil + }) +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/types.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/types.go new file mode 100644 index 0000000..ef9e4ea --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/types.go @@ -0,0 +1,498 @@ +package uast + +import ( + "fmt" + "reflect" + "strings" + + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/src-d/go-errors.v1" +) + +var ( + ErrIncorrectType = errors.NewKind("incorrect object type: %q, expected: %q") + ErrTypeNotRegistered = errors.NewKind("type is not registered: %q") +) + +var ( + namespaces = make(map[string]string) + package2ns = make(map[string]string) + type2name = make(map[reflect.Type]nodeID) + name2type = make(map[nodeID]reflect.Type) +) + +func parseNodeID(s string) nodeID { + i := strings.Index(s, ":") + if i < 0 { + return nodeID{Name: s} + } + return nodeID{ + NS: s[:i], Name: s[i+1:], + } +} + +type nodeID struct { + NS string + Name string +} + +func (n nodeID) IsValid() bool { + return n != (nodeID{}) +} +func (n nodeID) String() string { + if n.NS == "" { + return n.Name + } + return n.NS + ":" + n.Name +} + +func RegisterPackage(ns string, types ...interface{}) { + if _, ok := namespaces[ns]; ok { + panic("namespace already registered") + } else if len(types) == 0 { + panic("at least one type should be specified") + } + pkg := reflect.TypeOf(types[0]).PkgPath() + if _, ok := package2ns[pkg]; ok { + panic("package already registered") + } + namespaces[ns] = pkg + package2ns[pkg] = ns + + for _, o := range types { + rt := reflect.TypeOf(o) + if rt.Kind() == reflect.Ptr { + rt = rt.Elem() + } + if name, ok := type2name[rt]; ok { + panic(fmt.Errorf("type %v already registered under %s name", rt, name)) + } + name := nodeID{NS: ns, Name: rt.Name()} + type2name[rt] = name + name2type[name] = rt + } +} + +func LookupType(typ string) (reflect.Type, bool) { + name := parseNodeID(typ) + rt, ok := name2type[name] + return rt, ok +} + +func zeroFieldsTo(obj, opt nodes.Object, rt reflect.Type) error { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + if f.Anonymous { + if err := zeroFieldsTo(obj, opt, f.Type); err != nil { + return err + } + continue + } + name, omit, err := fieldName(f) + if err != nil { + return err + } + var v nodes.Node + switch f.Type.Kind() { + case reflect.String: + v = nodes.String("") + case reflect.Bool: + v = nodes.Bool(false) + case reflect.Float32, reflect.Float64: + v = nodes.Float(0) + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + v = nodes.Int(0) + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: + v = nodes.Uint(0) + } + if omit { + opt[name] = v + } else { + obj[name] = v + } + } + return nil +} + +func NewObjectByType(typ string) nodes.Object { + obj, _ := NewObjectByTypeOpt(typ) + return obj +} + +func NewObjectByTypeOpt(typ string) (obj, opt nodes.Object) { + name := parseNodeID(typ) + rt, ok := name2type[name] + if !ok { + return nil, nil + } + obj = make(nodes.Object) + opt = make(nodes.Object) + obj[KeyType] = nodes.String(typ) + switch rt.Kind() { + case reflect.Map: + // do nothing + default: + if err := zeroFieldsTo(obj, opt, rt); err != nil { + panic(err) + } + } + return obj, opt +} + +func NewValue(typ string) (reflect.Value, error) { + rt, ok := LookupType(typ) + if !ok { + return reflect.Value{}, ErrTypeNotRegistered.New(typ) + } + switch rt.Kind() { + case reflect.Map: + return reflect.MakeMap(rt), nil + default: + return reflect.New(rt).Elem(), nil + } +} + +func TypeOf(o interface{}) string { + switch obj := o.(type) { + case nil: + return "" + case nodes.Object: + tp, _ := obj[KeyType].(nodes.String) + return string(tp) + case nodes.ExternalObject: + v, _ := obj.ValueAt(KeyType) + if v == nil { + return "" + } + tp, _ := v.Value().(nodes.String) + return string(tp) + case nodes.Node, nodes.External: + // other generic nodes cannot store type + return "" + } + tp := reflect.TypeOf(o) + return typeOf(tp).String() +} + +func typeOf(tp reflect.Type) nodeID { + if name, ok := type2name[tp]; ok { + return name + } + pkg := tp.PkgPath() + if pkg == "" { + return nodeID{} + } + name := tp.Name() + if name == "" { + return nodeID{Name: name} + } + ns := package2ns[pkg] + return nodeID{NS: ns, Name: name} +} + +func fieldName(f reflect.StructField) (string, bool, error) { + name := strings.SplitN(f.Tag.Get("uast"), ",", 2)[0] + omitempty := false + if name == "" { + tags := strings.Split(f.Tag.Get("json"), ",") + for _, s := range tags[1:] { + if s == "omitempty" { + omitempty = true + break + } + } + name = tags[0] + } + if name == "" { + return "", false, fmt.Errorf("field %s should have uast or json name", f.Name) + } + return name, omitempty, nil +} + +var ( + reflString = reflect.TypeOf("") + reflAny = reflect.TypeOf((*Any)(nil)).Elem() + reflNode = reflect.TypeOf((*nodes.Node)(nil)).Elem() + reflNodeExt = reflect.TypeOf((*nodes.External)(nil)).Elem() +) + +// ToNode converts objects returned by schema-less encodings such as JSON to Node objects. +// It also supports types from packages registered via RegisterPackage. +func ToNode(o interface{}) (nodes.Node, error) { + return nodes.ToNode(o, toNodeFallback) +} + +func toNodeFallback(o interface{}) (nodes.Node, error) { + return toNodeReflect(reflect.ValueOf(o)) +} + +func toNodeReflect(rv reflect.Value) (nodes.Node, error) { + rt := rv.Type() + for rt.Kind() == reflect.Interface || rt.Kind() == reflect.Ptr { + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + rt = rv.Type() + } + if rt.ConvertibleTo(reflNode) { + return rv.Interface().(nodes.Node), nil + } else if rt.ConvertibleTo(reflNodeExt) { + n := rv.Interface().(nodes.External) + return nodes.ToNode(n, toNodeFallback) + } + switch rt.Kind() { + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + return nodes.Int(rv.Int()), nil + case reflect.Uint, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8: + return nodes.Uint(rv.Uint()), nil + case reflect.Float64, reflect.Float32: + return nodes.Float(rv.Float()), nil + case reflect.Bool: + return nodes.Bool(rv.Bool()), nil + case reflect.String: + return nodes.String(rv.String()), nil + case reflect.Slice: + // TODO: catch []byte + arr := make(nodes.Array, 0, rv.Len()) + for i := 0; i < rv.Len(); i++ { + v, err := toNodeReflect(rv.Index(i)) + if err != nil { + return nil, err + } + arr = append(arr, v) + } + return arr, nil + case reflect.Struct, reflect.Map: + name := typeOf(rt) + if name.NS == "" { + return nil, fmt.Errorf("type %v is not registered", rt) + } + typ := name.String() + + isStruct := rt.Kind() == reflect.Struct + + sz := 0 + if isStruct { + sz = rt.NumField() + } else { + sz = rv.Len() + } + + obj := make(nodes.Object, sz+1) + obj[KeyType] = nodes.String(typ) + + if isStruct { + if err := structToNode(obj, rv, rt); err != nil { + return nil, err + } + } else { + if rt.Key() != reflString { + return nil, fmt.Errorf("unsupported map key type: %v", rt.Key()) + } + for _, k := range rv.MapKeys() { + v, err := toNodeReflect(rv.MapIndex(k)) + if err != nil { + return nil, err + } + obj[k.String()] = v + } + } + return obj, nil + } + return nil, fmt.Errorf("unsupported type: %v", rt) +} + +func structToNode(obj nodes.Object, rv reflect.Value, rt reflect.Type) error { + for i := 0; i < rt.NumField(); i++ { + f := rv.Field(i) + if !f.CanInterface() { + continue + } + ft := rt.Field(i) + if ft.Anonymous { + if err := structToNode(obj, f, ft.Type); err != nil { + return err + } + continue + } + name, omit, err := fieldName(ft) + if err != nil { + return fmt.Errorf("type %s: %v", rt.Name(), err) + } + v, err := toNodeReflect(f) + if err != nil { + return err + } + if v == nil && omit { + continue + } + obj[name] = v + } + return nil +} + +func NodeAs(n nodes.External, dst interface{}) error { + var rv reflect.Value + if v, ok := dst.(reflect.Value); ok { + rv = v + } else { + rv = reflect.ValueOf(dst) + } + return nodeAs(n, rv) +} + +func setAnyOrNode(dst reflect.Value, n nodes.External) (bool, error) { + rt := dst.Type() + if rt == reflAny || rt == reflNodeExt { + dst.Set(reflect.ValueOf(n)) + return true, nil + } else if rt == reflNode { + nd, err := nodes.ToNode(n, nil) + if err != nil { + return false, err + } + dst.Set(reflect.ValueOf(nd)) + return true, nil + } + return false, nil +} + +func nodeAs(n nodes.External, rv reflect.Value) error { + orv := rv + if rv.Kind() == reflect.Ptr { + if rv.CanSet() && rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + if !rv.CanSet() && rv.Kind() != reflect.Map { + if !rv.IsValid() { + return fmt.Errorf("invalid value: %#v", orv) + } + return fmt.Errorf("argument should be a pointer: %v", rv.Type()) + } + if n == nil { + return nil + } + switch kind := n.Kind(); kind { + case nodes.KindNil: + return nil + case nodes.KindObject: + obj, ok := n.(nodes.ExternalObject) + if !ok { + return fmt.Errorf("external node has an object kind, but does not implement an interface: %T", n) + } + if ok, err := setAnyOrNode(rv, n); err != nil { + return err + } else if ok { + return nil + } + rt := rv.Type() + switch kind := rt.Kind(); kind { + case reflect.Struct, reflect.Map: + name := typeOf(rt) + etyp := name.String() + if typ := TypeOf(n); typ != etyp { + return ErrIncorrectType.New(typ, etyp) + } + if kind == reflect.Struct { + if err := nodeToStruct(rv, rt, obj); err != nil { + return err + } + } else { + if rv.IsNil() { + rv.Set(reflect.MakeMapWithSize(rt, obj.Size()-1)) + } + for _, k := range obj.Keys() { + if k == KeyType { + continue + } + v, _ := obj.ValueAt(k) + nv := reflect.New(rt.Elem()).Elem() + if err := nodeAs(v, nv); err != nil { + return err + } + rv.SetMapIndex(reflect.ValueOf(k), nv) + } + } + case reflect.Interface: + if nv, err := NewValue(TypeOf(n)); err == nil { + return nodeAs(n, nv.Elem()) + } else if !reflect.TypeOf(n).ConvertibleTo(rt) { + return fmt.Errorf("cannot create interface value %v for %#v", rt, n) + } + // we cannot determine the type of an object at this level, so just set it as-is + rv.Set(reflect.ValueOf(n).Convert(rt)) + default: + return fmt.Errorf("object: expected struct, map or interface as a field type, got %v", rt) + } + return nil + case nodes.KindArray: + arr, ok := n.(nodes.ExternalArray) + if !ok { + return fmt.Errorf("external node has an array kind, but does not implement an interface: %T", n) + } + if ok, err := setAnyOrNode(rv, n); err != nil { + return err + } else if ok { + return nil + } + rt := rv.Type() + if rt.Kind() != reflect.Slice { + return fmt.Errorf("expected slice, got %v", rt) + } + sz := arr.Size() + if rv.Cap() < sz { + rv.Set(reflect.MakeSlice(rt, sz, sz)) + } else { + rv = rv.Slice(0, sz) + } + for i := 0; i < sz; i++ { + v := arr.ValueAt(i) + if err := nodeAs(v, rv.Index(i)); err != nil { + return err + } + } + return nil + default: + rt := rv.Type() + if rt == reflAny { + return fmt.Errorf("expected UAST node, got: %T", n) + } + nv := reflect.ValueOf(n.Value()) + if !nv.Type().ConvertibleTo(rt) { + return fmt.Errorf("cannot convert %T to %v", n, rt) + } + rv.Set(nv.Convert(rt)) + return nil + } +} + +func nodeToStruct(rv reflect.Value, rt reflect.Type, obj nodes.ExternalObject) error { + for i := 0; i < rt.NumField(); i++ { + f := rv.Field(i) + if !f.CanInterface() { + continue + } + ft := rt.Field(i) + if ft.Anonymous { + if err := nodeToStruct(f, ft.Type, obj); err != nil { + return err + } + continue + } + name, _, err := fieldName(ft) + if err != nil { + return fmt.Errorf("type %s: %v", rt.Name(), err) + } + v, ok := obj.ValueAt(name) + if !ok { + continue + } + if err = nodeAs(v, f); err != nil { + return err + } + } + return nil +} diff --git a/vendor/gopkg.in/bblfsh/sdk.v2/uast/uast.go b/vendor/gopkg.in/bblfsh/sdk.v2/uast/uast.go new file mode 100644 index 0000000..e51d973 --- /dev/null +++ b/vendor/gopkg.in/bblfsh/sdk.v2/uast/uast.go @@ -0,0 +1,347 @@ +// Copyright 2017 Sourced Technologies SL +// Licensed under the Apache License, Version 2.0 (the "License"); you may not +// use this file except in compliance with the License. You may obtain a copy +// of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations under +// the License. + +// Package uast defines a UAST (Universal Abstract Syntax Tree) representation +// and operations to manipulate them. +package uast + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "gopkg.in/bblfsh/sdk.v2/uast/nodes" + "gopkg.in/bblfsh/sdk.v2/uast/role" +) + +func init() { + // Register all types from the package under this namespace + RegisterPackage(NS, + Position{}, + Positions{}, + GenNode{}, + Identifier{}, + String{}, + QualifiedIdentifier{}, + Comment{}, + Group{}, + FunctionGroup{}, + Block{}, + Alias{}, + Import{}, + RuntimeImport{}, + RuntimeReImport{}, + Argument{}, + FunctionType{}, + Function{}, + ) +} + +// Special field keys for nodes.Object +const ( + KeyType = "@type" // InternalType + KeyToken = "@token" // Token + KeyRoles = "@role" // Roles, for representations see RoleList + KeyPos = "@pos" // All positional information is stored in this field +) + +const ( + // NS is a namespace for the UAST types. + NS = "uast" + + // TypePosition is a node type for positional information in AST. See AsPosition. + TypePosition = NS + ":Position" + // TypePositions is a node type for a root node of positional information in AST. See AsPositions. + TypePositions = NS + ":Positions" + // TypeOperator is a node type for an operator AST node. See Operator. + TypeOperator = NS + ":Operator" + // KeyPosOff is a name for a Position object field that stores a bytes offset. + KeyPosOff = "offset" + // KeyPosLine is a name for a Position object field that stores a source line. + KeyPosLine = "line" + // KeyPosCol is a name for a Position object field that stores a source column. + KeyPosCol = "col" + + KeyStart = "start" // StartPosition + KeyEnd = "end" // EndPosition +) + +// Position represents a position in a source code file. +type Position struct { + // Offset is the position as an absolute byte offset. It is a 0-based index. + Offset uint32 `json:"offset"` + // Line is the line number. It is a 1-based index. + Line uint32 `json:"line"` + // Col is the column number (the byte offset of the position relative to + // a line. It is a 1-based index. + Col uint32 `json:"col"` +} + +func (p Position) HasOffset() bool { + return p.Offset != 0 || (p.Line == 1 && p.Col == 1) +} + +func (p Position) HasLineCol() bool { + return p.Line != 0 && p.Col != 0 +} + +func (p Position) Valid() bool { + return p != (Position{}) +} + +func (p Position) Less(p2 Position) bool { + if !p.Valid() { + return false + } else if !p2.Valid() { + return true + } + if p.HasOffset() && p2.HasOffset() { + return p.Offset < p2.Offset + } + if p.Line != p2.Line { + if p.Line != 0 && p2.Line != 0 { + return p.Line < p2.Line + } + return p.Line != 0 + } + return p.Col != 0 && p.Col < p2.Col +} + +// Positions is a container object that stores all positional information for a node. +type Positions map[string]Position + +// Keys returns a sorted slice of position names. +func (p Positions) Keys() []string { + arr := make([]string, 0, len(p)) + for k := range p { + arr = append(arr, k) + } + sort.Strings(arr) + return arr +} + +// Start returns a start position of the node. +func (p Positions) Start() *Position { + if p, ok := p[KeyStart]; ok { + return &p + } + return nil +} + +// End returns an end position of the node. +func (p Positions) End() *Position { + if p, ok := p[KeyEnd]; ok { + return &p + } + return nil +} + +// ToObject converts positions to a generic object. +func (p Positions) ToObject() nodes.Object { + n, err := toNodeReflect(reflect.ValueOf(p)) + if err != nil { + panic(err) + } + return n.(nodes.Object) +} + +// AsPosition transforms a generic AST node to a Position object. +func AsPosition(m nodes.Object) *Position { + if TypeOf(m) != TypePosition { + return nil + } + var p Position + if err := NodeAs(m, &p); err != nil { + panic(err) + } + return &p +} + +// PositionsOf returns an object with all positional information for a node. +func PositionsOf(m nodes.Object) Positions { + o, _ := m[KeyPos].(nodes.Object) + if len(o) == 0 { + return nil + } + ps := make(Positions, len(o)) + for k, v := range o { + po, _ := v.(nodes.Object) + if p := AsPosition(po); p != nil { + ps[k] = *p + } + } + return ps +} + +// ToObject converts Position to a generic AST node. +func (p Position) ToObject() nodes.Object { + n, err := toNodeReflect(reflect.ValueOf(&p)) + if err != nil { + panic(err) + } + return n.(nodes.Object) +} + +// RoleList converts a set of roles into a list node. +func RoleList(roles ...role.Role) nodes.Array { + arr := make(nodes.Array, 0, len(roles)) + for _, r := range roles { + arr = append(arr, nodes.String(r.String())) + } + return arr +} + +// RolesOf is a helper for getting node UAST roles (see KeyRoles). +func RolesOf(m nodes.Object) role.Roles { + arr, ok := m[KeyRoles].(nodes.Array) + if !ok || len(arr) == 0 { + if tp := TypeOf(m); tp == "" || strings.HasPrefix(tp, NS+":") { + return nil + } + return role.Roles{role.Unannotated} + } + out := make(role.Roles, 0, len(arr)) + for _, v := range arr { + if r, ok := v.(nodes.String); ok { + out = append(out, role.FromString(string(r))) + } + } + return out +} + +// TokenOf is a helper for getting node token (see KeyToken). +func TokenOf(m nodes.Object) string { + t := m[KeyToken] + s, ok := t.(nodes.String) + if ok { + return string(s) + } + v, _ := t.(nodes.Value) + if v != nil { + return fmt.Sprint(v) + } + return "" +} + +// Tokens collects all tokens of the tree recursively (pre-order). +func Tokens(n nodes.Node) []string { + var tokens []string + nodes.WalkPreOrder(n, func(n nodes.Node) bool { + if obj, ok := n.(nodes.Object); ok { + if tok := TokenOf(obj); tok != "" { + tokens = append(tokens, tok) + } + } + return true + }) + return tokens +} + +// HashNoPos hashes the node, but skips positional information. +func HashNoPos(n nodes.External) nodes.Hash { + h := nodes.NewHasher() + h.KeyFilter = func(key string) bool { + return key != KeyPos + } + return h.HashOf(n) +} + +// Any is an alias type for any UAST node. +type Any interface{} + +// Scope is a temporary definition of a scope semantic type. +type Scope = Any + +type GenNode struct { + Positions Positions `json:"@pos,omitempty"` +} + +type Identifier struct { + GenNode + Name string `json:"Name"` +} + +type String struct { + GenNode + Value string `json:"Value"` + Format string `json:"Format"` // TODO: make an enum later +} + +type QualifiedIdentifier struct { + GenNode + Names []Identifier `json:"Names"` +} + +type Comment struct { + GenNode + Text string `json:"Text"` + Prefix string `json:"Prefix"` + Suffix string `json:"Suffix"` + Tab string `json:"Tab"` + Block bool `json:"Block"` +} + +type Group struct { + GenNode + Nodes []Any `json:"Nodes"` +} + +type FunctionGroup Group + +type Block struct { + GenNode + Statements []Any `json:"Statements"` + // Scope *Scope +} + +type Alias struct { + GenNode + Name Identifier `json:"Name"` + Node Any `json:"Node"` + // Target *Scope +} + +type Import struct { + GenNode + Path Any `json:"Path"` + All bool `json:"All"` + Names []Any `json:"Names"` + Target Scope `json:"Target"` +} + +type RuntimeImport Import + +type RuntimeReImport RuntimeImport + +//type InlineImport Import + +type Argument struct { + GenNode + Name *Identifier `json:"Name"` + Type Any `json:"Type"` + Init Any `json:"Init"` + Variadic bool `json:"Variadic"` + MapVariadic bool `json:"MapVariadic"` + Receiver bool `json:"Receiver"` +} + +type FunctionType struct { + GenNode + Arguments []Argument `json:"Arguments"` + Returns []Argument `json:"Returns"` +} + +type Function struct { + GenNode + Type FunctionType `json:"Type"` + Body *Block `json:"Body"` +}