1
0
mirror of https://github.com/taigrr/wasm-experiments synced 2025-01-18 04:03:21 -08:00

Remove wasmgo example

This commit is contained in:
Johan Brandhorst 2019-02-26 21:12:25 +00:00
parent 5bada70f17
commit 3f3a4ea966
823 changed files with 0 additions and 404952 deletions

303
Gopkg.lock generated
View File

@ -1,303 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:cbdefc58241a9baf1608b51d52fa11e9778ed0ae05cc1765643369a092b6aa3b"
name = "cloud.google.com/go"
packages = [
"compute/metadata",
"datastore",
"internal",
"internal/atomiccache",
"internal/fields",
"internal/trace",
"internal/version",
]
pruneopts = "UT"
revision = "64a2037ec6be8a4b0c1d1f706ed35b428b989239"
version = "v0.26.0"
[[projects]]
branch = "master"
digest = "1:4e1f46fdeb614d6a9dec18da8de7afc0fe994312ed6a2473c69cf749886a1795"
name = "github.com/dave/jsgo"
packages = [
"assets/std",
"server/servermsg",
"server/wasm/messages",
]
pruneopts = "UT"
revision = "812d6854f9eadd98c18ae36864adf3446c3f2f0e"
[[projects]]
branch = "master"
digest = "1:ed131ac6b04eae55b7448356fdfd56659fc5aa5a6e7ab29a0ddb966f351adcd5"
name = "github.com/dave/services"
packages = [
".",
"constor/constormsg",
]
pruneopts = "UT"
revision = "3f8b08b528872b96cd80fc7025edf98b1b55f047"
[[projects]]
branch = "master"
digest = "1:a34319a40e75889795a1e1bdfc3a8b96c88fbfc658ad9c6ed648a3205938dd3a"
name = "github.com/dave/wasmgo"
packages = [
".",
"cmd",
"cmd/cmdconfig",
"cmd/deployer",
"config",
]
pruneopts = "UT"
revision = "cd9f1b82897a926ec9df9397711bb4c6732069f7"
[[projects]]
digest = "1:a8fc2604eb5975548b14b5bc9db87f90bf2d2b2e08ecc6be9310ec1a1d640ae0"
name = "github.com/golang/protobuf"
packages = [
"proto",
"protoc-gen-go/descriptor",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/struct",
"ptypes/timestamp",
"ptypes/wrappers",
]
pruneopts = "UT"
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
version = "v1.1.0"
[[projects]]
digest = "1:e145e9710a10bc114a6d3e2738aadf8de146adaa031854ffdf7bbfe15da85e63"
name = "github.com/googleapis/gax-go"
packages = ["."]
pruneopts = "UT"
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
version = "v2.0.0"
[[projects]]
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
name = "github.com/gorilla/websocket"
packages = ["."]
pruneopts = "UT"
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
version = "v1.2.0"
[[projects]]
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
name = "github.com/inconshreveable/mousetrap"
packages = ["."]
pruneopts = "UT"
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
version = "v1.0"
[[projects]]
branch = "master"
digest = "1:7b4cd865877639770d2711dc6278b98466be7ebbe5cdf2c933659b73d6e58213"
name = "github.com/pkg/browser"
packages = ["."]
pruneopts = "UT"
revision = "c90ca0c84f15f81c982e32665bffd8d7aac8f097"
[[projects]]
digest = "1:645cabccbb4fa8aab25a956cbcbdf6a6845ca736b2c64e197ca7cbb9d210b939"
name = "github.com/spf13/cobra"
packages = ["."]
pruneopts = "UT"
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
version = "v0.0.3"
[[projects]]
digest = "1:dab83a1bbc7ad3d7a6ba1a1cc1760f25ac38cdf7d96a5cdd55cd915a4f5ceaf9"
name = "github.com/spf13/pflag"
packages = ["."]
pruneopts = "UT"
revision = "9a97c102cda95a86cec2345a6f09f55a939babf5"
version = "v1.0.2"
[[projects]]
digest = "1:42d3653af0643d37fe3d708f51fae9bebe0cf6f6a801597ec07097b1fd4a7b85"
name = "go.opencensus.io"
packages = [
".",
"internal",
"internal/tagencoding",
"plugin/ocgrpc",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/propagation",
]
pruneopts = "UT"
revision = "7b558058b7cc960667590e5413ef55157b06652e"
version = "v0.15.0"
[[projects]]
branch = "master"
digest = "1:3c4175c2711d67096567fc2d84a83464d6ff58119af3efc89983339d64144cb0"
name = "golang.org/x/net"
packages = [
"context",
"context/ctxhttp",
"http/httpguts",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"trace",
]
pruneopts = "UT"
revision = "c39426892332e1bb5ec0a434a079bf82f5d30c54"
[[projects]]
branch = "master"
digest = "1:bea0314c10bd362ab623af4880d853b5bad3b63d0ab9945c47e461b8d04203ed"
name = "golang.org/x/oauth2"
packages = [
".",
"google",
"internal",
"jws",
"jwt",
]
pruneopts = "UT"
revision = "3d292e4d0cdc3a0113e6d207bb137145ef1de42f"
[[projects]]
branch = "master"
digest = "1:0316a8ad208917f1d141b077e278980fd5e4594f3a85f835dacbf24d85798252"
name = "golang.org/x/sys"
packages = ["unix"]
pruneopts = "UT"
revision = "14742f9018cd6651ec7364dc6ee08af0baaa1031"
[[projects]]
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
name = "golang.org/x/text"
packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable",
]
pruneopts = "UT"
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:207ab3dedc2f8776f4e1eebb817c0993019666bb2b7d9b46de03da85044df8df"
name = "google.golang.org/api"
packages = [
"googleapi",
"googleapi/internal/uritemplates",
"internal",
"iterator",
"option",
"transport/grpc",
]
pruneopts = "UT"
revision = "0e8d13b5c025da6a7cf249bb854e5869921dd459"
[[projects]]
digest = "1:26619fcd2452b4044174d26acd8b09c09dffee9a1c3a22d2383b873aa9a0131f"
name = "google.golang.org/appengine"
packages = [
".",
"internal",
"internal/app_identity",
"internal/base",
"internal/datastore",
"internal/log",
"internal/modules",
"internal/remote_api",
"internal/socket",
"internal/urlfetch",
"socket",
"urlfetch",
]
pruneopts = "UT"
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:a76b2bff3873b80af50ec8d0a47dc503d11f4ca7a82b00a50053a7c82b012bac"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
"googleapis/datastore/v1",
"googleapis/rpc/code",
"googleapis/rpc/status",
"googleapis/type/latlng",
]
pruneopts = "UT"
revision = "383e8b2c3b9e36c4076b235b32537292176bae20"
[[projects]]
digest = "1:f4fdb603068ae856b9eb05772787bf4966b1fabd3b074e3f840aab417bd992c3"
name = "google.golang.org/grpc"
packages = [
".",
"balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"credentials/oauth",
"encoding",
"encoding/proto",
"grpclog",
"internal",
"internal/backoff",
"internal/channelz",
"internal/envconfig",
"internal/grpcrand",
"internal/transport",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
]
pruneopts = "UT"
revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455"
version = "v1.14.0"
[[projects]]
digest = "1:d431beffd9ad96318d3548cf2c2d3b8f3915a3972767254e5bf711d5df7425e3"
name = "gopkg.in/src-d/go-billy.v4"
packages = ["."]
pruneopts = "UT"
revision = "83cf655d40b15b427014d7875d10850f96edba14"
version = "v4.2.0"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
input-imports = ["github.com/dave/wasmgo"]
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,7 +0,0 @@
required = [
"github.com/dave/wasmgo",
]
[prune]
go-tests = true
unused-packages = true

View File

@ -28,9 +28,5 @@ fetch:
cp $$(go env GOROOT)/misc/wasm/wasm_exec.js ./html/wasm_exec.js cp $$(go env GOROOT)/misc/wasm/wasm_exec.js ./html/wasm_exec.js
sed -i -e 's;</button>;</button>\n\t<div id=\"target\"></div>;' ./html/index.html sed -i -e 's;</button>;</button>\n\t<div id=\"target\"></div>;' ./html/index.html
.PHONY: jsgo
jsgo:
wasmgo -c=go deploy github.com/johanbrandhorst/wasm-experiments/jsgo
serve: serve:
go run main.go go run main.go

View File

@ -1,20 +0,0 @@
// +build js,wasm
package main
import "syscall/js"
var document js.Value
func init() {
document = js.Global().Get("document")
}
func main() {
div := document.Get("body")
node := document.Call("createElement", "div")
node.Set("innerText", "Hello jsgo.io!")
div.Call("appendChild", node)
}

15
vendor/cloud.google.com/go/AUTHORS generated vendored
View File

@ -1,15 +0,0 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

View File

@ -1,40 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Alexis Hunt <lexer@google.com>
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
James Hall <james.hall@shopify.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Kunpei Sakai <namusyaka@gmail.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com>
Mario Castro <mariocaster@gmail.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Thanatat Tamtan <acoshift@gmail.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

202
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,503 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metadata provides access to Google Compute Engine (GCE)
// metadata and API service accounts.
//
// This package is a wrapper around the GCE metadata service,
// as documented at https://developers.google.com/compute/docs/metadata.
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"runtime"
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
// metadataIP is the documented metadata server IP address.
metadataIP = "169.254.169.254"
// metadataHostEnv is the environment variable specifying the
// GCE metadata hostname. If empty, the default value of
// metadataIP ("169.254.169.254") is used instead.
// This is variable name is not defined by any spec, as far as
// I know; it was made up for the Go package.
metadataHostEnv = "GCE_METADATA_HOST"
userAgent = "gcloud-golang/0.1"
)
type cachedValue struct {
k string
trim bool
mu sync.Mutex
v string
}
var (
projID = &cachedValue{k: "project/project-id", trim: true}
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
instID = &cachedValue{k: "instance/id", trim: true}
)
var (
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}}
)
// NotDefinedError is returned when requested metadata is not defined.
//
// The underlying string is the suffix after "/computeMetadata/v1/".
//
// This error is not returned if the value is defined to be the empty
// string.
type NotDefinedError string
func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = cl.getTrimmed(c.k)
} else {
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
}
return
}
var (
onGCEOnce sync.Once
onGCE bool
)
// OnGCE reports whether this process is running on Google Compute Engine.
func OnGCE() bool {
onGCEOnce.Do(initOnGCE)
return onGCE
}
func initOnGCE() {
onGCE = testOnGCE()
}
func testOnGCE() bool {
// The user explicitly said they're on GCE, so trust them.
if os.Getenv(metadataHostEnv) != "" {
return true
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, defaultClient.hc, req)
if err != nil {
resc <- false
return
}
defer res.Body.Close()
resc <- res.Header.Get("Metadata-Flavor") == "Google"
}()
go func() {
addrs, err := net.LookupHost("metadata.google.internal")
if err != nil || len(addrs) == 0 {
resc <- false
return
}
resc <- strsContains(addrs, metadataIP)
}()
tryHarder := systemInfoSuggestsGCE()
if tryHarder {
res := <-resc
if res {
// The first strategy succeeded, so let's use it.
return true
}
// Wait for either the DNS or metadata server probe to
// contradict the other one and say we are running on
// GCE. Give it a lot of time to do so, since the system
// info already suggests we're running on a GCE BIOS.
timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
case res = <-resc:
return res
case <-timer.C:
// Too slow. Who knows what this system is.
return false
}
}
// There's no hint from the system info that we're running on
// GCE, so use the first probe's result as truth, whether it's
// true or false. The goal here is to optimize for speed for
// users who are NOT running on GCE. We can't assume that
// either a DNS lookup or an HTTP request to a blackholed IP
// address is fast. Worst case this should return when the
// metaClient's Transport.ResponseHeaderTimeout or
// Transport.Dial.Timeout fires (in two seconds).
return <-resc
}
// systemInfoSuggestsGCE reports whether the local system (without
// doing network requests) suggests that we're running on GCE. If this
// returns true, testOnGCE tries a bit harder to reach its metadata
// server.
func systemInfoSuggestsGCE() bool {
if runtime.GOOS != "linux" {
// We don't have any non-Linux clues available, at least yet.
return false
}
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
name := strings.TrimSpace(string(slurp))
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
//
// Subscribe calls fn with the latest metadata value indicated by the provided
// suffix. If the metadata value is deleted, fn is called with the empty string
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
if err := fn(val, true); err != nil {
return err
}
ok := true
if strings.ContainsRune(suffix, '?') {
suffix += "&wait_for_change=true&last_etag="
} else {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
continue // Retry on other errors.
}
ok = false
}
lastETag = etag
if err := fn(val, ok); err != nil || !ok {
return err
}
}
}

View File

@ -1,118 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
gax "github.com/googleapis/gax-go"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
// Embed so we still implement the DatastoreClient interface,
// if the interface adds more methods.
pb.DatastoreClient
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(
resourcePrefixHeader, "projects/"+projectID,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (res *pb.LookupResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Lookup(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (res *pb.RunQueryResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.RunQuery(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (res *pb.BeginTransactionResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.BeginTransaction(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (res *pb.CommitResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Commit(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (res *pb.RollbackResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Rollback(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (res *pb.AllocateIdsResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.AllocateIds(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) invoke(ctx context.Context, f func(ctx context.Context) error) error {
ctx = metadata.NewOutgoingContext(ctx, dc.md)
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = f(ctx)
return !shouldRetry(err), err
})
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
s, ok := status.FromError(err)
if !ok {
return false
}
// See https://cloud.google.com/datastore/docs/concepts/errors.
return s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded
}

View File

@ -1,627 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"log"
"os"
"reflect"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/api/option"
gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
)
const (
prodAddr = "datastore.googleapis.com:443"
userAgent = "gcloud-golang-datastore/20160401"
)
// ScopeDatastore grants permissions to view and/or manage datastore entities
const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"
// Client is a client for reading and writing data in a datastore dataset.
type Client struct {
conn *grpc.ClientConn
client pb.DatastoreClient
endpoint string
dataset string // Called dataset by the datastore API, synonym for project ID.
}
// NewClient creates a new Client for a given dataset.
// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable.
// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value
// to connect to a locally-running datastore emulator.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
var o []option.ClientOption
// Environment variables for gcd emulator:
// https://cloud.google.com/datastore/docs/tools/datastore-emulator
// If the emulator is available, dial it without passing any credentials.
if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" {
o = []option.ClientOption{
option.WithEndpoint(addr),
option.WithoutAuthentication(),
option.WithGRPCDialOption(grpc.WithInsecure()),
}
} else {
o = []option.ClientOption{
option.WithEndpoint(prodAddr),
option.WithScopes(ScopeDatastore),
option.WithUserAgent(userAgent),
}
}
// Warn if we see the legacy emulator environment variables.
if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.")
}
if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" {
log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.")
}
if projectID == "" {
projectID = os.Getenv("DATASTORE_PROJECT_ID")
}
if projectID == "" {
return nil, errors.New("datastore: missing project/dataset id")
}
o = append(o, opts...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
return &Client{
conn: conn,
client: newDatastoreClient(conn, projectID),
dataset: projectID,
}, nil
}
var (
// ErrInvalidEntityType is returned when functions like Get or Next are
// passed a dst or src argument of invalid type.
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
// ErrInvalidKey is returned when an invalid key is presented.
ErrInvalidKey = errors.New("datastore: invalid key")
// ErrNoSuchEntity is returned when no entity was found for a given key.
ErrNoSuchEntity = errors.New("datastore: no such entity")
)
type multiArgType int
const (
multiArgTypeInvalid multiArgType = iota
multiArgTypePropertyLoadSaver
multiArgTypeStruct
multiArgTypeStructPtr
multiArgTypeInterface
)
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct.
// StructType is the type of the struct pointed to by the destination argument
// passed to Get or to Iterator.Next.
type ErrFieldMismatch struct {
StructType reflect.Type
FieldName string
Reason string
}
func (e *ErrFieldMismatch) Error() string {
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
e.FieldName, e.StructType, e.Reason)
}
// GeoPoint represents a location as latitude/longitude in degrees.
type GeoPoint struct {
Lat, Lng float64
}
// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
func (g GeoPoint) Valid() bool {
return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
}
func keyToProto(k *Key) *pb.Key {
if k == nil {
return nil
}
var path []*pb.Key_PathElement
for {
el := &pb.Key_PathElement{Kind: k.Kind}
if k.ID != 0 {
el.IdType = &pb.Key_PathElement_Id{Id: k.ID}
} else if k.Name != "" {
el.IdType = &pb.Key_PathElement_Name{Name: k.Name}
}
path = append(path, el)
if k.Parent == nil {
break
}
k = k.Parent
}
// The path should be in order [grandparent, parent, child]
// We did it backward above, so reverse back.
for i := 0; i < len(path)/2; i++ {
path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i]
}
key := &pb.Key{Path: path}
if k.Namespace != "" {
key.PartitionId = &pb.PartitionId{
NamespaceId: k.Namespace,
}
}
return key
}
// protoToKey decodes a protocol buffer representation of a key into an
// equivalent *Key object. If the key is invalid, protoToKey will return the
// invalid key along with ErrInvalidKey.
func protoToKey(p *pb.Key) (*Key, error) {
var key *Key
var namespace string
if partition := p.PartitionId; partition != nil {
namespace = partition.NamespaceId
}
for _, el := range p.Path {
key = &Key{
Namespace: namespace,
Kind: el.Kind,
ID: el.GetId(),
Name: el.GetName(),
Parent: key,
}
}
if !key.valid() { // Also detects key == nil.
return key, ErrInvalidKey
}
return key, nil
}
// multiKeyToProto is a batch version of keyToProto.
func multiKeyToProto(keys []*Key) []*pb.Key {
ret := make([]*pb.Key, len(keys))
for i, k := range keys {
ret[i] = keyToProto(k)
}
return ret
}
// multiKeyToProto is a batch version of keyToProto.
func multiProtoToKey(keys []*pb.Key) ([]*Key, error) {
hasErr := false
ret := make([]*Key, len(keys))
err := make(MultiError, len(keys))
for i, k := range keys {
ret[i], err[i] = protoToKey(k)
if err[i] != nil {
hasErr = true
}
}
if hasErr {
return nil, err
}
return ret, nil
}
// multiValid is a batch version of Key.valid. It returns an error, not a
// []bool.
func multiValid(key []*Key) error {
invalid := false
for _, k := range key {
if !k.valid() {
invalid = true
break
}
}
if !invalid {
return nil
}
err := make(MultiError, len(key))
for i, k := range key {
if !k.valid() {
err[i] = ErrInvalidKey
}
}
return err
}
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
// type S, for some interface type I, or some non-interface non-pointer type P
// such that P or *P implements PropertyLoadSaver.
//
// It returns what category the slice's elements are, and the reflect.Type
// that represents S, I or P.
//
// As a special case, PropertyList is an invalid type for v.
//
// TODO(djd): multiArg is very confusing. Fold this logic into the
// relevant Put/Get methods to make the logic less opaque.
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
if v.Kind() != reflect.Slice {
return multiArgTypeInvalid, nil
}
if v.Type() == typeOfPropertyList {
return multiArgTypeInvalid, nil
}
elemType = v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return multiArgTypePropertyLoadSaver, elemType
}
switch elemType.Kind() {
case reflect.Struct:
return multiArgTypeStruct, elemType
case reflect.Interface:
return multiArgTypeInterface, elemType
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return multiArgTypeStructPtr, elemType
}
}
return multiArgTypeInvalid, nil
}
// Close closes the Client.
func (c *Client) Close() error {
return c.conn.Close()
}
// Get loads the entity stored for key into dst, which must be a struct pointer
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
// returns ErrNoSuchEntity.
//
// The values of dst's unmatched struct fields are not modified, and matching
// slice-typed fields are not reset before appending to them. In particular, it
// is recommended to pass a pointer to a zero valued struct on each Get call.
//
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct. ErrFieldMismatch is only returned if
// dst is a struct pointer.
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Get")
defer func() { trace.EndSpan(ctx, err) }()
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
return ErrInvalidEntityType
}
err = c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
//
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
// type I, or some non-interface non-pointer type P such that P or *P
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
//
// As a special case, PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when []PropertyList was intended.
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.GetMulti")
defer func() { trace.EndSpan(ctx, err) }()
return c.get(ctx, keys, dst, nil)
}
func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error {
v := reflect.ValueOf(dst)
multiArgType, _ := checkMultiArg(v)
// Sanity checks
if multiArgType == multiArgTypeInvalid {
return errors.New("datastore: dst has invalid type")
}
if len(keys) != v.Len() {
return errors.New("datastore: keys and dst slices have different length")
}
if len(keys) == 0 {
return nil
}
// Go through keys, validate them, serialize then, and create a dict mapping them to their indices.
// Equal keys are deduped.
multiErr, any := make(MultiError, len(keys)), false
keyMap := make(map[string][]int, len(keys))
pbKeys := make([]*pb.Key, 0, len(keys))
for i, k := range keys {
if !k.valid() {
multiErr[i] = ErrInvalidKey
any = true
} else {
ks := k.String()
if _, ok := keyMap[ks]; !ok {
pbKeys = append(pbKeys, keyToProto(k))
}
keyMap[ks] = append(keyMap[ks], i)
}
}
if any {
return multiErr
}
req := &pb.LookupRequest{
ProjectId: c.dataset,
Keys: pbKeys,
ReadOptions: opts,
}
resp, err := c.client.Lookup(ctx, req)
if err != nil {
return err
}
found := resp.Found
missing := resp.Missing
// Upper bound 100 iterations to prevent infinite loop.
// We choose 100 iterations somewhat logically:
// Max number of Entities you can request from Datastore is 1,000.
// Max size for a Datastore Entity is 1 MiB.
// Max request size is 10 MiB, so we assume max response size is also 10 MiB.
// 1,000 / 10 = 100.
// Note that if ctx has a deadline, the deadline will probably
// be hit before we reach 100 iterations.
for i := 0; len(resp.Deferred) > 0 && i < 100; i++ {
req.Keys = resp.Deferred
resp, err = c.client.Lookup(ctx, req)
if err != nil {
return err
}
found = append(found, resp.Found...)
missing = append(missing, resp.Missing...)
}
filled := 0
for _, e := range found {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
}
}
}
for _, e := range missing {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
multiErr[index] = ErrNoSuchEntity
}
any = true
}
if filled != len(keys) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
if any {
return multiErr
}
return nil
}
// Put saves the entity src into the datastore with key k. src must be a struct
// pointer or implement PropertyLoadSaver; if a struct pointer then any
// unexported fields of that struct will be skipped. If k is an incomplete key,
// the returned key will be a unique key generated by the datastore.
func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) {
k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(MultiError); ok {
return nil, me[0]
}
return nil, err
}
return k[0], nil
}
// PutMulti is a batch version of Put.
//
// src must satisfy the same conditions as the dst argument to GetMulti.
// TODO(jba): rewrite in terms of Mutate.
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) (ret []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.PutMulti")
defer func() { trace.EndSpan(ctx, err) }()
mutations, err := putMutations(keys, src)
if err != nil {
return nil, err
}
// Make the request.
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
resp, err := c.client.Commit(ctx, req)
if err != nil {
return nil, err
}
// Copy any newly minted keys into the returned keys.
ret = make([]*Key, len(keys))
for i, key := range keys {
if key.Incomplete() {
// This key is in the mutation results.
ret[i], err = protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
} else {
ret[i] = key
}
}
return ret, nil
}
func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) {
v := reflect.ValueOf(src)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return nil, errors.New("datastore: src has invalid type")
}
if len(keys) != v.Len() {
return nil, errors.New("datastore: key and src slices have different length")
}
if len(keys) == 0 {
return nil, nil
}
if err := multiValid(keys); err != nil {
return nil, err
}
mutations := make([]*pb.Mutation, 0, len(keys))
multiErr := make(MultiError, len(keys))
hasErr := false
for i, k := range keys {
elem := v.Index(i)
// Two cases where we need to take the address:
// 1) multiArgTypePropertyLoadSaver => &elem implements PLS
// 2) multiArgTypeStruct => saveEntity needs *struct
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
p, err := saveEntity(k, elem.Interface())
if err != nil {
multiErr[i] = err
hasErr = true
}
var mut *pb.Mutation
if k.Incomplete() {
mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}}
} else {
mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}}
}
mutations = append(mutations, mut)
}
if hasErr {
return nil, multiErr
}
return mutations, nil
}
// Delete deletes the entity for the given key.
func (c *Client) Delete(ctx context.Context, key *Key) error {
err := c.DeleteMulti(ctx, []*Key{key})
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
// TODO(jba): rewrite in terms of Mutate.
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.DeleteMulti")
defer func() { trace.EndSpan(ctx, err) }()
mutations, err := deleteMutations(keys)
if err != nil {
return err
}
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: mutations,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
_, err = c.client.Commit(ctx, req)
return err
}
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
mutations := make([]*pb.Mutation, 0, len(keys))
set := make(map[string]bool, len(keys))
for _, k := range keys {
if k.Incomplete() {
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
}
ks := k.String()
if !set[ks] {
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
})
}
set[ks] = true
}
return mutations, nil
}
// Mutate applies one or more mutations atomically.
// It returns the keys of the argument Mutations, in the same order.
//
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
// Mutate returns a MultiError in this case even if there is only one Mutation.
func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) (ret []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Mutate")
defer func() { trace.EndSpan(ctx, err) }()
pmuts, err := mutationProtos(muts)
if err != nil {
return nil, err
}
req := &pb.CommitRequest{
ProjectId: c.dataset,
Mutations: pmuts,
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
}
resp, err := c.client.Commit(ctx, req)
if err != nil {
return nil, err
}
// Copy any newly minted keys into the returned keys.
ret = make([]*Key, len(muts))
for i, mut := range muts {
if mut.key.Incomplete() {
// This key is in the mutation results.
ret[i], err = protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
} else {
ret[i] = mut.key
}
}
return ret, nil
}

View File

@ -1,488 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package datastore provides a client for Google Cloud Datastore.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
Basic Operations
Entities are the unit of storage and are associated with a key. A key
consists of an optional parent key, a string application ID, a string kind
(also known as an entity type), and either a StringID or an IntID. A
StringID is also known as an entity name or key name.
It is valid to create a key with a zero StringID and a zero IntID; this is
called an incomplete key, and does not refer to any saved entity. Putting an
entity into the datastore under an incomplete key will cause a unique key
to be generated for that entity, with a non-zero IntID.
An entity's contents are a mapping from case-sensitive field names to values.
Valid value types are:
- signed integers (int, int8, int16, int32 and int64),
- bool,
- string,
- float32 and float64,
- []byte (up to 1 megabyte in length),
- any type whose underlying type is one of the above predeclared types,
- *Key,
- GeoPoint,
- time.Time (stored with microsecond precision, retrieved as local time),
- structs whose fields are all valid value types,
- pointers to structs whose fields are all valid value types,
- slices of any of the above,
- pointers to a signed integer, bool, string, float32, or float64.
Slices of structs are valid, as are structs that contain slices.
The Get and Put functions load and save an entity's contents. An entity's
contents are typically represented by a struct pointer.
Example code:
type Entity struct {
Value string
}
func main() {
ctx := context.Background()
// Create a datastore client. In a typical application, you would create
// a single client which is reused for every datastore operation.
dsClient, err := datastore.NewClient(ctx, "my-project")
if err != nil {
// Handle error.
}
k := datastore.NameKey("Entity", "stringID", nil)
e := new(Entity)
if err := dsClient.Get(ctx, k, e); err != nil {
// Handle error.
}
old := e.Value
e.Value = "Hello World!"
if _, err := dsClient.Put(ctx, k, e); err != nil {
// Handle error.
}
fmt.Printf("Updated value from %q to %q\n", old, e.Value)
}
GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
Delete functions. They take a []*Key instead of a *Key, and may return a
datastore.MultiError when encountering partial failure.
Mutate generalizes PutMulti and DeleteMulti to a sequence of any Datastore mutations.
It takes a series of mutations created with NewInsert, NewUpdate, NewUpsert and
NewDelete and applies them atomically.
Properties
An entity's contents can be represented by a variety of types. These are
typically struct pointers, but can also be any type that implements the
PropertyLoadSaver interface. If using a struct pointer, you do not have to
explicitly implement the PropertyLoadSaver interface; the datastore will
automatically convert via reflection. If a struct pointer does implement that
interface then those methods will be used in preference to the default
behavior for struct pointers. Struct pointers are more strongly typed and are
easier to use; PropertyLoadSavers are more flexible.
The actual types passed do not have to match between Get and Put calls or even
across different calls to datastore. It is valid to put a *PropertyList and
get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
Conceptually, any entity is saved as a sequence of properties, and is loaded
into the destination value on a property-by-property basis. When loading into
a struct pointer, an entity that cannot be completely represented (such as a
missing field) will result in an ErrFieldMismatch error but it is up to the
caller whether this error is fatal, recoverable or ignorable.
By default, for struct pointers, all properties are potentially indexed, and
the property name is the same as the field name (and hence must start with an
upper case letter).
Fields may have a `datastore:"name,options"` tag. The tag name is the
property name, which must be one or more valid Go identifiers joined by ".",
but may start with a lower case letter. An empty tag name means to just use the
field name. A "-" tag name means that the datastore will ignore that field.
The only valid options are "omitempty", "noindex" and "flatten".
If the options include "omitempty" and the value of the field is empty, then the
field will be omitted on Save. The empty values are false, 0, any nil pointer or
interface value, and any array, slice, map, or string of length zero. Struct field
values will never be empty, except for nil pointers.
If options include "noindex" then the field will not be indexed. All fields are indexed
by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
fields used to store long strings and byte slices must be tagged with "noindex"
or they will cause Put operations to fail.
For a nested struct field, the options may also include "flatten". This indicates
that the immediate fields and any nested substruct fields of the nested struct should be
flattened. See below for examples.
To use multiple options together, separate them by a comma.
The order does not matter.
If the options is "" then the comma may be omitted.
Example code:
// A and B are renamed to a and b.
// A, C and J are not indexed.
// D's tag is equivalent to having no tag at all (E).
// I is ignored entirely by the datastore.
// J has tag information for both the datastore and json packages.
type TaggedStruct struct {
A int `datastore:"a,noindex"`
B int `datastore:"b"`
C int `datastore:",noindex"`
D int `datastore:""`
E int
I int `datastore:"-"`
J int `datastore:",noindex" json:"j"`
}
Slice Fields
A field of slice type corresponds to a Datastore array property, except for []byte, which corresponds
to a Datastore blob.
Zero-length slice fields are not saved. Slice fields of length 1 or greater are saved
as Datastore arrays. When a zero-length Datastore array is loaded into a slice field,
the slice field remains unchanged.
If a non-array value is loaded into a slice field, the result will be a slice with
one element, containing the value.
Loading Nulls
Loading a Datastore Null into a basic type (int, float, etc.) results in a zero value.
Loading a Null into a slice of basic type results in a slice of size 1 containing the zero value.
Loading a Null into a pointer field results in nil.
Loading a Null into a field of struct type is an error.
Pointer Fields
A struct field can be a pointer to a signed integer, floating-point number, string or
bool. Putting a non-nil pointer will store its dereferenced value. Putting a nil
pointer will store a Datastore Null property, unless the field is marked omitempty,
in which case no property will be stored.
Loading a Null into a pointer field sets the pointer to nil. Loading any other value
allocates new storage with the value, and sets the field to point to it.
Key Field
If the struct contains a *datastore.Key field tagged with the name "__key__",
its value will be ignored on Put. When reading the Entity back into the Go struct,
the field will be populated with the *datastore.Key value used to query for
the Entity.
Example code:
type MyEntity struct {
A int
K *datastore.Key `datastore:"__key__"`
}
k := datastore.NameKey("Entity", "stringID", nil)
e := MyEntity{A: 12}
k, err = dsClient.Put(ctx, k, e)
if err != nil {
// Handle error.
}
var entities []MyEntity
q := datastore.NewQuery("Entity").Filter("A =", 12).Limit(1)
_, err := dsClient.GetAll(ctx, q, &entities)
if err != nil {
// Handle error
}
log.Println(entities[0])
// Prints {12 /Entity,stringID}
Structured Properties
If the struct pointed to contains other structs, then the nested or embedded
structs are themselves saved as Entity values. For example, given these definitions:
type Inner struct {
W int32
X string
}
type Outer struct {
I Inner
}
then an Outer would have one property, Inner, encoded as an Entity value.
If an outer struct is tagged "noindex" then all of its implicit flattened
fields are effectively "noindex".
If the Inner struct contains a *Key field with the name "__key__", like so:
type Inner struct {
W int32
X string
K *datastore.Key `datastore:"__key__"`
}
type Outer struct {
I Inner
}
then the value of K will be used as the Key for Inner, represented
as an Entity value in datastore.
If any nested struct fields should be flattened, instead of encoded as
Entity values, the nested struct field should be tagged with the "flatten"
option. For example, given the following:
type Inner1 struct {
W int32
X string
}
type Inner2 struct {
Y float64
}
type Inner3 struct {
Z bool
}
type Inner4 struct {
WW int
}
type Inner5 struct {
X Inner4
}
type Outer struct {
A int16
I []Inner1 `datastore:",flatten"`
J Inner2 `datastore:",flatten"`
K Inner5 `datastore:",flatten"`
Inner3 `datastore:",flatten"`
}
an Outer's properties would be equivalent to those of:
type OuterEquivalent struct {
A int16
IDotW []int32 `datastore:"I.W"`
IDotX []string `datastore:"I.X"`
JDotY float64 `datastore:"J.Y"`
KDotXDotWW int `datastore:"K.X.WW"`
Z bool
}
Note that the "flatten" option cannot be used for Entity value fields.
The server will reject any dotted field names for an Entity value.
The PropertyLoadSaver Interface
An entity's contents can also be represented by any type that implements the
PropertyLoadSaver interface. This type may be a struct pointer, but it does
not have to be. The datastore package will call Load when getting the entity's
contents, and Save when putting the entity's contents.
Possible uses include deriving non-stored fields, verifying fields, or indexing
a field only if its value is positive.
Example code:
type CustomPropsExample struct {
I, J int
// Sum is not stored, but should always be equal to I + J.
Sum int `datastore:"-"`
}
func (x *CustomPropsExample) Load(ps []datastore.Property) error {
// Load I and J as usual.
if err := datastore.LoadStruct(x, ps); err != nil {
return err
}
// Derive the Sum field.
x.Sum = x.I + x.J
return nil
}
func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
// Validate the Sum field.
if x.Sum != x.I + x.J {
return nil, errors.New("CustomPropsExample has inconsistent sum")
}
// Save I and J as usual. The code below is equivalent to calling
// "return datastore.SaveStruct(x)", but is done manually for
// demonstration purposes.
return []datastore.Property{
{
Name: "I",
Value: int64(x.I),
},
{
Name: "J",
Value: int64(x.J),
},
}, nil
}
The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
arbitrary entity's contents.
The KeyLoader Interface
If a type implements the PropertyLoadSaver interface, it may
also want to implement the KeyLoader interface.
The KeyLoader interface exists to allow implementations of PropertyLoadSaver
to also load an Entity's Key into the Go type. This type may be a struct
pointer, but it does not have to be. The datastore package will call LoadKey
when getting the entity's contents, after calling Load.
Example code:
type WithKeyExample struct {
I int
Key *datastore.Key
}
func (x *WithKeyExample) LoadKey(k *datastore.Key) error {
x.Key = k
return nil
}
func (x *WithKeyExample) Load(ps []datastore.Property) error {
// Load I as usual.
return datastore.LoadStruct(x, ps)
}
func (x *WithKeyExample) Save() ([]datastore.Property, error) {
// Save I as usual.
return datastore.SaveStruct(x)
}
To load a Key into a struct which does not implement the PropertyLoadSaver
interface, see the "Key Field" section above.
Queries
Queries retrieve entities based on their properties or key's ancestry. Running
a query yields an iterator of results: either keys or (key, entity) pairs.
Queries are re-usable and it is safe to call Query.Run from concurrent
goroutines. Iterators are not safe for concurrent use.
Queries are immutable, and are either created by calling NewQuery, or derived
from an existing query by calling a method like Filter or Order that returns a
new query value. A query is typically constructed by calling NewQuery followed
by a chain of zero or more such methods. These methods are:
- Ancestor and Filter constrain the entities returned by running a query.
- Order affects the order in which they are returned.
- Project constrains the fields returned.
- Distinct de-duplicates projected entities.
- KeysOnly makes the iterator return only keys, not (key, entity) pairs.
- Start, End, Offset and Limit define which sub-sequence of matching entities
to return. Start and End take cursors, Offset and Limit take integers. Start
and Offset affect the first result, End and Limit affect the last result.
If both Start and Offset are set, then the offset is relative to Start.
If both End and Limit are set, then the earliest constraint wins. Limit is
relative to Start+Offset, not relative to End. As a special case, a
negative limit means unlimited.
Example code:
type Widget struct {
Description string
Price int
}
func printWidgets(ctx context.Context, client *datastore.Client) {
q := datastore.NewQuery("Widget").
Filter("Price <", 1000).
Order("-Price")
for t := client.Run(ctx, q); ; {
var x Widget
key, err := t.Next(&x)
if err == iterator.Done {
break
}
if err != nil {
// Handle error.
}
fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x)
}
}
Transactions
Client.RunInTransaction runs a function in a transaction.
Example code:
type Counter struct {
Count int
}
func incCount(ctx context.Context, client *datastore.Client) {
var count int
key := datastore.NameKey("Counter", "singleton", nil)
_, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
var x Counter
if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity {
return err
}
x.Count++
if _, err := tx.Put(key, &x); err != nil {
return err
}
count = x.Count
return nil
})
if err != nil {
// Handle error.
}
// The value of count is only valid once the transaction is successful
// (RunInTransaction has returned nil).
fmt.Printf("Count=%d\n", count)
}
Pass the ReadOnly option to RunInTransaction if your transaction is used only for Get,
GetMulti or queries. Read-only transactions are more efficient.
Google Cloud Datastore Emulator
This package supports the Cloud Datastore emulator, which is useful for testing and
development. Environment variables are used to indicate that datastore traffic should be
directed to the emulator instead of the production Datastore service.
To install and set up the emulator and its environment variables, see the documentation
at https://cloud.google.com/datastore/docs/tools/datastore-emulator.
*/
package datastore // import "cloud.google.com/go/datastore"

View File

@ -1,47 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file provides error functions for common API failure modes.
package datastore
import (
"fmt"
)
// MultiError is returned by batch operations when there are errors with
// particular elements. Errors will be in a one-to-one correspondence with
// the input elements; successful elements will have a nil entry.
type MultiError []error
func (m MultiError) Error() string {
s, n := "", 0
for _, e := range m {
if e != nil {
if n == 0 {
s = e.Error()
}
n++
}
}
switch n {
case 0:
return "(0 errors)"
case 1:
return s
case 2:
return s + " (and 1 other error)"
}
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
}

View File

@ -1,280 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"bytes"
"encoding/base64"
"encoding/gob"
"errors"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
// Key represents the datastore key for a stored entity.
type Key struct {
// Kind cannot be empty.
Kind string
// Either ID or Name must be zero for the Key to be valid.
// If both are zero, the Key is incomplete.
ID int64
Name string
// Parent must either be a complete Key or nil.
Parent *Key
// Namespace provides the ability to partition your data for multiple
// tenants. In most cases, it is not necessary to specify a namespace.
// See docs on datastore multitenancy for details:
// https://cloud.google.com/datastore/docs/concepts/multitenancy
Namespace string
}
// Incomplete reports whether the key does not refer to a stored entity.
func (k *Key) Incomplete() bool {
return k.Name == "" && k.ID == 0
}
// valid returns whether the key is valid.
func (k *Key) valid() bool {
if k == nil {
return false
}
for ; k != nil; k = k.Parent {
if k.Kind == "" {
return false
}
if k.Name != "" && k.ID != 0 {
return false
}
if k.Parent != nil {
if k.Parent.Incomplete() {
return false
}
if k.Parent.Namespace != k.Namespace {
return false
}
}
}
return true
}
// Equal reports whether two keys are equal. Two keys are equal if they are
// both nil, or if their kinds, IDs, names, namespaces and parents are equal.
func (k *Key) Equal(o *Key) bool {
for {
if k == nil || o == nil {
return k == o // if either is nil, both must be nil
}
if k.Namespace != o.Namespace || k.Name != o.Name || k.ID != o.ID || k.Kind != o.Kind {
return false
}
if k.Parent == nil && o.Parent == nil {
return true
}
k = k.Parent
o = o.Parent
}
}
// marshal marshals the key's string representation to the buffer.
func (k *Key) marshal(b *bytes.Buffer) {
if k.Parent != nil {
k.Parent.marshal(b)
}
b.WriteByte('/')
b.WriteString(k.Kind)
b.WriteByte(',')
if k.Name != "" {
b.WriteString(k.Name)
} else {
b.WriteString(strconv.FormatInt(k.ID, 10))
}
}
// String returns a string representation of the key.
func (k *Key) String() string {
if k == nil {
return ""
}
b := bytes.NewBuffer(make([]byte, 0, 512))
k.marshal(b)
return b.String()
}
// Note: Fields not renamed compared to appengine gobKey struct
// This ensures gobs created by appengine can be read here, and vice/versa
type gobKey struct {
Kind string
StringID string
IntID int64
Parent *gobKey
AppID string
Namespace string
}
func keyToGobKey(k *Key) *gobKey {
if k == nil {
return nil
}
return &gobKey{
Kind: k.Kind,
StringID: k.Name,
IntID: k.ID,
Parent: keyToGobKey(k.Parent),
Namespace: k.Namespace,
}
}
func gobKeyToKey(gk *gobKey) *Key {
if gk == nil {
return nil
}
return &Key{
Kind: gk.Kind,
Name: gk.StringID,
ID: gk.IntID,
Parent: gobKeyToKey(gk.Parent),
Namespace: gk.Namespace,
}
}
// GobEncode marshals the key into a sequence of bytes
// using an encoding/gob.Encoder.
func (k *Key) GobEncode() ([]byte, error) {
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// GobDecode unmarshals a sequence of bytes using an encoding/gob.Decoder.
func (k *Key) GobDecode(buf []byte) error {
gk := new(gobKey)
if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
return err
}
*k = *gobKeyToKey(gk)
return nil
}
// MarshalJSON marshals the key into JSON.
func (k *Key) MarshalJSON() ([]byte, error) {
return []byte(`"` + k.Encode() + `"`), nil
}
// UnmarshalJSON unmarshals a key JSON object into a Key.
func (k *Key) UnmarshalJSON(buf []byte) error {
if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
return errors.New("datastore: bad JSON key")
}
k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
if err != nil {
return err
}
*k = *k2
return nil
}
// Encode returns an opaque representation of the key
// suitable for use in HTML and URLs.
// This is compatible with the Python and Java runtimes.
func (k *Key) Encode() string {
pKey := keyToProto(k)
b, err := proto.Marshal(pKey)
if err != nil {
panic(err)
}
// Trailing padding is stripped.
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// DecodeKey decodes a key from the opaque representation returned by Encode.
func DecodeKey(encoded string) (*Key, error) {
// Re-add padding.
if m := len(encoded) % 4; m != 0 {
encoded += strings.Repeat("=", 4-m)
}
b, err := base64.URLEncoding.DecodeString(encoded)
if err != nil {
return nil, err
}
pKey := new(pb.Key)
if err := proto.Unmarshal(b, pKey); err != nil {
return nil, err
}
return protoToKey(pKey)
}
// AllocateIDs accepts a slice of incomplete keys and returns a
// slice of complete keys that are guaranteed to be valid in the datastore.
func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) {
if keys == nil {
return nil, nil
}
req := &pb.AllocateIdsRequest{
ProjectId: c.dataset,
Keys: multiKeyToProto(keys),
}
resp, err := c.client.AllocateIds(ctx, req)
if err != nil {
return nil, err
}
return multiProtoToKey(resp.Keys)
}
// IncompleteKey creates a new incomplete key.
// The supplied kind cannot be empty.
// The namespace of the new key is empty.
func IncompleteKey(kind string, parent *Key) *Key {
return &Key{
Kind: kind,
Parent: parent,
}
}
// NameKey creates a new key with a name.
// The supplied kind cannot be empty.
// The supplied parent must either be a complete key or nil.
// The namespace of the new key is empty.
func NameKey(kind, name string, parent *Key) *Key {
return &Key{
Kind: kind,
Name: name,
Parent: parent,
}
}
// IDKey creates a new key with an ID.
// The supplied kind cannot be empty.
// The supplied parent must either be a complete key or nil.
// The namespace of the new key is empty.
func IDKey(kind string, id int64, parent *Key) *Key {
return &Key{
Kind: kind,
ID: id,
Parent: parent,
}
}

View File

@ -1,509 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
"reflect"
"strings"
"time"
"cloud.google.com/go/internal/fields"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
var (
typeOfByteSlice = reflect.TypeOf([]byte(nil))
typeOfTime = reflect.TypeOf(time.Time{})
typeOfGeoPoint = reflect.TypeOf(GeoPoint{})
typeOfKeyPtr = reflect.TypeOf(&Key{})
typeOfEntityPtr = reflect.TypeOf(&Entity{})
)
// typeMismatchReason returns a string explaining why the property p could not
// be stored in an entity field of type v.Type().
func typeMismatchReason(p Property, v reflect.Value) string {
entityType := "empty"
switch p.Value.(type) {
case int64:
entityType = "int"
case bool:
entityType = "bool"
case string:
entityType = "string"
case float64:
entityType = "float"
case *Key:
entityType = "*datastore.Key"
case *Entity:
entityType = "*datastore.Entity"
case GeoPoint:
entityType = "GeoPoint"
case time.Time:
entityType = "time.Time"
case []byte:
entityType = "[]byte"
}
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
}
func overflowReason(x interface{}, v reflect.Value) string {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
type propertyLoader struct {
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
// been seen so far. The map is constructed lazily.
m map[string]int
}
func (l *propertyLoader) load(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string {
sl, ok := p.Value.([]interface{})
if !ok {
return l.loadOneElement(codec, structValue, p, prev)
}
for _, val := range sl {
p.Value = val
if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" {
return errStr
}
}
return ""
}
// loadOneElement loads the value of Property p into structValue based on the provided
// codec. codec is used to find the field in structValue into which p should be loaded.
// prev is the set of property names already seen for structValue.
func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string {
var sliceOk bool
var sliceIndex int
var v reflect.Value
name := p.Name
fieldNames := strings.Split(name, ".")
for len(fieldNames) > 0 {
var field *fields.Field
// Start by trying to find a field with name. If none found,
// cut off the last field (delimited by ".") and find its parent
// in the codec.
// eg. for name "A.B.C.D", split off "A.B.C" and try to
// find a field in the codec with this name.
// Loop again with "A.B", etc.
for i := len(fieldNames); i > 0; i-- {
parent := strings.Join(fieldNames[:i], ".")
field = codec.Match(parent)
if field != nil {
fieldNames = fieldNames[i:]
break
}
}
// If we never found a matching field in the codec, return
// error message.
if field == nil {
return "no such struct field"
}
v = initField(structValue, field.Index)
if !v.IsValid() {
return "no such struct field"
}
if !v.CanSet() {
return "cannot set struct field"
}
// If field implements PLS, we delegate loading to the PLS's Load early,
// and stop iterating through fields.
ok, err := plsFieldLoad(v, p, fieldNames)
if err != nil {
return err.Error()
}
if ok {
return ""
}
if field.Type.Kind() == reflect.Struct {
codec, err = structCache.Fields(field.Type)
if err != nil {
return err.Error()
}
structValue = v
}
// If the element is a slice, we need to accommodate it.
if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
if l.m == nil {
l.m = make(map[string]int)
}
sliceIndex = l.m[p.Name]
l.m[p.Name] = sliceIndex + 1
for v.Len() <= sliceIndex {
v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
}
structValue = v.Index(sliceIndex)
// If structValue implements PLS, we delegate loading to the PLS's
// Load early, and stop iterating through fields.
ok, err := plsFieldLoad(structValue, p, fieldNames)
if err != nil {
return err.Error()
}
if ok {
return ""
}
if structValue.Type().Kind() == reflect.Struct {
codec, err = structCache.Fields(structValue.Type())
if err != nil {
return err.Error()
}
}
sliceOk = true
}
}
var slice reflect.Value
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
slice = v
v = reflect.New(v.Type().Elem()).Elem()
} else if _, ok := prev[p.Name]; ok && !sliceOk {
// Zero the field back out that was set previously, turns out
// it's a slice and we don't know what to do with it
v.Set(reflect.Zero(v.Type()))
return "multiple-valued property requires a slice field type"
}
prev[p.Name] = struct{}{}
if errReason := setVal(v, p); errReason != "" {
// Set the slice back to its zero value.
if slice.IsValid() {
slice.Set(reflect.Zero(slice.Type()))
}
return errReason
}
if slice.IsValid() {
slice.Index(sliceIndex).Set(v)
}
return ""
}
// plsFieldLoad first tries to converts v's value to a PLS, then v's addressed
// value to a PLS. If neither succeeds, plsFieldLoad returns false for first return
// value. Otherwise, the first return value will be true.
// If v is successfully converted to a PLS, plsFieldLoad will then try to Load
// the property p into v (by way of the PLS's Load method).
//
// If the field v has been flattened, the Property's name must be altered
// before calling Load to reflect the field v.
// For example, if our original field name was "A.B.C.D",
// and at this point in iteration we had initialized the field
// corresponding to "A" and have moved into the struct, so that now
// v corresponds to the field named "B", then we want to let the
// PLS handle this field (B)'s subfields ("C", "D"),
// so we send the property to the PLS's Load, renamed to "C.D".
//
// If subfields are present, the field v has been flattened.
func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err error) {
vpls, err := plsForLoad(v)
if err != nil {
return false, err
}
if vpls == nil {
return false, nil
}
// If Entity, load properties as well as key.
if e, ok := p.Value.(*Entity); ok {
err = loadEntity(vpls, e)
return true, err
}
// If flattened, we must alter the property's name to reflect
// the field v.
if len(subfields) > 0 {
p.Name = strings.Join(subfields, ".")
}
return true, vpls.Load([]Property{p})
}
// setVal sets 'v' to the value of the Property 'p'.
func setVal(v reflect.Value, p Property) (s string) {
pValue := p.Value
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x, ok := pValue.(int64)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.OverflowInt(x) {
return overflowReason(x, v)
}
v.SetInt(x)
case reflect.Bool:
x, ok := pValue.(bool)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.SetBool(x)
case reflect.String:
x, ok := pValue.(string)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.SetString(x)
case reflect.Float32, reflect.Float64:
x, ok := pValue.(float64)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.OverflowFloat(x) {
return overflowReason(x, v)
}
v.SetFloat(x)
case reflect.Ptr:
// v must be a pointer to either a Key, an Entity, or one of the supported basic types.
if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct && !isValidPointerType(v.Type().Elem()) {
return typeMismatchReason(p, v)
}
if pValue == nil {
// If v is populated already, set it to nil.
if !v.IsNil() {
v.Set(reflect.New(v.Type()).Elem())
}
return ""
}
if x, ok := p.Value.(*Key); ok {
if _, ok := v.Interface().(*Key); !ok {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
return ""
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
switch x := pValue.(type) {
case *Entity:
err := loadEntity(v.Interface(), x)
if err != nil {
return err.Error()
}
case int64:
if v.Elem().OverflowInt(x) {
return overflowReason(x, v.Elem())
}
v.Elem().SetInt(x)
case float64:
if v.Elem().OverflowFloat(x) {
return overflowReason(x, v.Elem())
}
v.Elem().SetFloat(x)
case bool:
v.Elem().SetBool(x)
case string:
v.Elem().SetString(x)
case GeoPoint, time.Time:
v.Elem().Set(reflect.ValueOf(x))
default:
return typeMismatchReason(p, v)
}
case reflect.Struct:
switch v.Type() {
case typeOfTime:
x, ok := pValue.(time.Time)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
case typeOfGeoPoint:
x, ok := pValue.(GeoPoint)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
v.Set(reflect.ValueOf(x))
default:
ent, ok := pValue.(*Entity)
if !ok {
return typeMismatchReason(p, v)
}
err := loadEntity(v.Addr().Interface(), ent)
if err != nil {
return err.Error()
}
}
case reflect.Slice:
x, ok := pValue.([]byte)
if !ok && pValue != nil {
return typeMismatchReason(p, v)
}
if v.Type().Elem().Kind() != reflect.Uint8 {
return typeMismatchReason(p, v)
}
v.SetBytes(x)
default:
return typeMismatchReason(p, v)
}
return ""
}
// initField is similar to reflect's Value.FieldByIndex, in that it
// returns the nested struct field corresponding to index, but it
// initialises any nil pointers encountered when traversing the structure.
func initField(val reflect.Value, index []int) reflect.Value {
for _, i := range index[:len(index)-1] {
val = val.Field(i)
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
}
return val.Field(index[len(index)-1])
}
// loadEntityProto loads an EntityProto into PropertyLoadSaver or struct pointer.
func loadEntityProto(dst interface{}, src *pb.Entity) error {
ent, err := protoToEntity(src)
if err != nil {
return err
}
return loadEntity(dst, ent)
}
func loadEntity(dst interface{}, ent *Entity) error {
if pls, ok := dst.(PropertyLoadSaver); ok {
err := pls.Load(ent.Properties)
if err != nil {
return err
}
if e, ok := dst.(KeyLoader); ok {
err = e.LoadKey(ent.Key)
}
return err
}
return loadEntityToStruct(dst, ent)
}
func loadEntityToStruct(dst interface{}, ent *Entity) error {
pls, err := newStructPLS(dst)
if err != nil {
return err
}
// Try and load key.
keyField := pls.codec.Match(keyFieldName)
if keyField != nil && ent.Key != nil {
pls.v.FieldByIndex(keyField.Index).Set(reflect.ValueOf(ent.Key))
}
// Load properties.
return pls.Load(ent.Properties)
}
func (s structPLS) Load(props []Property) error {
var fieldName, errReason string
var l propertyLoader
prev := make(map[string]struct{})
for _, p := range props {
if errStr := l.load(s.codec, s.v, p, prev); errStr != "" {
// We don't return early, as we try to load as many properties as possible.
// It is valid to load an entity into a struct that cannot fully represent it.
// That case returns an error, but the caller is free to ignore it.
fieldName, errReason = p.Name, errStr
}
}
if errReason != "" {
return &ErrFieldMismatch{
StructType: s.v.Type(),
FieldName: fieldName,
Reason: errReason,
}
}
return nil
}
func protoToEntity(src *pb.Entity) (*Entity, error) {
props := make([]Property, 0, len(src.Properties))
for name, val := range src.Properties {
v, err := propToValue(val)
if err != nil {
return nil, err
}
props = append(props, Property{
Name: name,
Value: v,
NoIndex: val.ExcludeFromIndexes,
})
}
var key *Key
if src.Key != nil {
// Ignore any error, since nested entity values
// are allowed to have an invalid key.
key, _ = protoToKey(src.Key)
}
return &Entity{key, props}, nil
}
// propToValue returns a Go value that represents the PropertyValue. For
// example, a TimestampValue becomes a time.Time.
func propToValue(v *pb.Value) (interface{}, error) {
switch v := v.ValueType.(type) {
case *pb.Value_NullValue:
return nil, nil
case *pb.Value_BooleanValue:
return v.BooleanValue, nil
case *pb.Value_IntegerValue:
return v.IntegerValue, nil
case *pb.Value_DoubleValue:
return v.DoubleValue, nil
case *pb.Value_TimestampValue:
return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil
case *pb.Value_KeyValue:
return protoToKey(v.KeyValue)
case *pb.Value_StringValue:
return v.StringValue, nil
case *pb.Value_BlobValue:
return []byte(v.BlobValue), nil
case *pb.Value_GeoPointValue:
return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil
case *pb.Value_EntityValue:
return protoToEntity(v.EntityValue)
case *pb.Value_ArrayValue:
arr := make([]interface{}, 0, len(v.ArrayValue.Values))
for _, v := range v.ArrayValue.Values {
vv, err := propToValue(v)
if err != nil {
return nil, err
}
arr = append(arr, vv)
}
return arr, nil
default:
return nil, nil
}
}

View File

@ -1,129 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
// A Mutation represents a change to a Datastore entity.
type Mutation struct {
key *Key // needed for transaction PendingKeys and to dedup deletions
mut *pb.Mutation
err error
}
func (m *Mutation) isDelete() bool {
_, ok := m.mut.Operation.(*pb.Mutation_Delete)
return ok
}
// NewInsert creates a mutation that will save the entity src into the datastore with
// key k, returning an error if k already exists.
// See Client.Put for valid values of src.
func NewInsert(k *Key, src interface{}) *Mutation {
if !k.valid() {
return &Mutation{err: ErrInvalidKey}
}
p, err := saveEntity(k, src)
if err != nil {
return &Mutation{err: err}
}
return &Mutation{
key: k,
mut: &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}},
}
}
// NewUpsert creates a mutation that saves the entity src into the datastore with key
// k, whether or not k exists. See Client.Put for valid values of src.
func NewUpsert(k *Key, src interface{}) *Mutation {
if !k.valid() {
return &Mutation{err: ErrInvalidKey}
}
p, err := saveEntity(k, src)
if err != nil {
return &Mutation{err: err}
}
return &Mutation{
key: k,
mut: &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}},
}
}
// NewUpdate creates a mutation that replaces the entity in the datastore with key k,
// returning an error if k does not exist. See Client.Put for valid values of src.
func NewUpdate(k *Key, src interface{}) *Mutation {
if !k.valid() {
return &Mutation{err: ErrInvalidKey}
}
if k.Incomplete() {
return &Mutation{err: fmt.Errorf("datastore: can't update the incomplete key: %v", k)}
}
p, err := saveEntity(k, src)
if err != nil {
return &Mutation{err: err}
}
return &Mutation{
key: k,
mut: &pb.Mutation{Operation: &pb.Mutation_Update{Update: p}},
}
}
// NewDelete creates a mutation that deletes the entity with key k.
func NewDelete(k *Key) *Mutation {
if !k.valid() {
return &Mutation{err: ErrInvalidKey}
}
if k.Incomplete() {
return &Mutation{err: fmt.Errorf("datastore: can't delete the incomplete key: %v", k)}
}
return &Mutation{
key: k,
mut: &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}},
}
}
func mutationProtos(muts []*Mutation) ([]*pb.Mutation, error) {
// If any of the mutations have errors, collect and return them.
var merr MultiError
for i, m := range muts {
if m.err != nil {
if merr == nil {
merr = make(MultiError, len(muts))
}
merr[i] = m.err
}
}
if merr != nil {
return nil, merr
}
var protos []*pb.Mutation
// Collect protos. Remove duplicate deletions (see deleteMutations).
seen := map[string]bool{}
for _, m := range muts {
if m.isDelete() {
ks := m.key.String()
if seen[ks] {
continue
}
seen[ks] = true
}
protos = append(protos, m.mut)
}
return protos, nil
}

View File

@ -1,342 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
"reflect"
"strings"
"unicode"
"cloud.google.com/go/internal/fields"
)
// Entities with more than this many indexed properties will not be saved.
const maxIndexedProperties = 20000
// []byte fields more than 1 megabyte long will not be loaded or saved.
const maxBlobLen = 1 << 20
// Property is a name/value pair plus some metadata. A datastore entity's
// contents are loaded and saved as a sequence of Properties. Each property
// name must be unique within an entity.
type Property struct {
// Name is the property name.
Name string
// Value is the property value. The valid types are:
// - int64
// - bool
// - string
// - float64
// - *Key
// - time.Time (retrieved as local time)
// - GeoPoint
// - []byte (up to 1 megabyte in length)
// - *Entity (representing a nested struct)
// Value can also be:
// - []interface{} where each element is one of the above types
// This set is smaller than the set of valid struct field types that the
// datastore can load and save. A Value's type must be explicitly on
// the list above; it is not sufficient for the underlying type to be
// on that list. For example, a Value of "type myInt64 int64" is
// invalid. Smaller-width integers and floats are also invalid. Again,
// this is more restrictive than the set of valid struct field types.
//
// A Value will have an opaque type when loading entities from an index,
// such as via a projection query. Load entities into a struct instead
// of a PropertyLoadSaver when using a projection query.
//
// A Value may also be the nil interface value; this is equivalent to
// Python's None but not directly representable by a Go struct. Loading
// a nil-valued property into a struct will set that field to the zero
// value.
Value interface{}
// NoIndex is whether the datastore cannot index this property.
// If NoIndex is set to false, []byte and string values are limited to
// 1500 bytes.
NoIndex bool
}
// An Entity is the value type for a nested struct.
// This type is only used for a Property's Value.
type Entity struct {
Key *Key
Properties []Property
}
// PropertyLoadSaver can be converted from and to a slice of Properties.
type PropertyLoadSaver interface {
Load([]Property) error
Save() ([]Property, error)
}
// KeyLoader can store a Key.
type KeyLoader interface {
// PropertyLoadSaver is embedded because a KeyLoader
// must also always implement PropertyLoadSaver.
PropertyLoadSaver
LoadKey(k *Key) error
}
// PropertyList converts a []Property to implement PropertyLoadSaver.
type PropertyList []Property
var (
typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
)
// Load loads all of the provided properties into l.
// It does not first reset *l to an empty slice.
func (l *PropertyList) Load(p []Property) error {
*l = append(*l, p...)
return nil
}
// Save saves all of l's properties as a slice of Properties.
func (l *PropertyList) Save() ([]Property, error) {
return *l, nil
}
// validPropertyName returns whether name consists of one or more valid Go
// identifiers joined by ".".
func validPropertyName(name string) bool {
if name == "" {
return false
}
for _, s := range strings.Split(name, ".") {
if s == "" {
return false
}
first := true
for _, c := range s {
if first {
first = false
if c != '_' && !unicode.IsLetter(c) {
return false
}
} else {
if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
}
return true
}
// parseTag interprets datastore struct field tags
func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
s := t.Get("datastore")
parts := strings.Split(s, ",")
if parts[0] == "-" && len(parts) == 1 {
return "", false, nil, nil
}
if parts[0] != "" && !validPropertyName(parts[0]) {
err = fmt.Errorf("datastore: struct tag has invalid property name: %q", parts[0])
return "", false, nil, err
}
var opts saveOpts
if len(parts) > 1 {
for _, p := range parts[1:] {
switch p {
case "flatten":
opts.flatten = true
case "omitempty":
opts.omitEmpty = true
case "noindex":
opts.noIndex = true
default:
err = fmt.Errorf("datastore: struct tag has invalid option: %q", p)
return "", false, nil, err
}
}
other = opts
}
return parts[0], true, other, nil
}
func validateType(t reflect.Type) error {
if t.Kind() != reflect.Struct {
return fmt.Errorf("datastore: validate called with non-struct type %s", t)
}
return validateChildType(t, "", false, false, map[reflect.Type]bool{})
}
// validateChildType is a recursion helper func for validateType
func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool, prevTypes map[reflect.Type]bool) error {
if prevTypes[t] {
return nil
}
prevTypes[t] = true
switch t.Kind() {
case reflect.Slice:
if flatten && prevSlice {
return fmt.Errorf("datastore: flattening nested structs leads to a slice of slices: field %q", fieldName)
}
return validateChildType(t.Elem(), fieldName, flatten, true, prevTypes)
case reflect.Struct:
if t == typeOfTime || t == typeOfGeoPoint {
return nil
}
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
// If a named field is unexported, ignore it. An anonymous
// unexported field is processed, because it may contain
// exported fields, which are visible.
exported := (f.PkgPath == "")
if !exported && !f.Anonymous {
continue
}
_, keep, other, err := parseTag(f.Tag)
// Handle error from parseTag now instead of later (in cache.Fields call).
if err != nil {
return err
}
if !keep {
continue
}
if other != nil {
opts := other.(saveOpts)
flatten = flatten || opts.flatten
}
if err := validateChildType(f.Type, f.Name, flatten, prevSlice, prevTypes); err != nil {
return err
}
}
case reflect.Ptr:
if t == typeOfKeyPtr {
return nil
}
return validateChildType(t.Elem(), fieldName, flatten, prevSlice, prevTypes)
}
return nil
}
// isLeafType determines whether or not a type is a 'leaf type'
// and should not be recursed into, but considered one field.
func isLeafType(t reflect.Type) bool {
return t == typeOfTime || t == typeOfGeoPoint
}
// structCache collects the structs whose fields have already been calculated.
var structCache = fields.NewCache(parseTag, validateType, isLeafType)
// structPLS adapts a struct to be a PropertyLoadSaver.
type structPLS struct {
v reflect.Value
codec fields.List
}
// newStructPLS returns a structPLS, which implements the
// PropertyLoadSaver interface, for the struct pointer p.
func newStructPLS(p interface{}) (*structPLS, error) {
v := reflect.ValueOf(p)
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
return nil, ErrInvalidEntityType
}
v = v.Elem()
f, err := structCache.Fields(v.Type())
if err != nil {
return nil, err
}
return &structPLS{v, f}, nil
}
// LoadStruct loads the properties from p to dst.
// dst must be a struct pointer.
//
// The values of dst's unmatched struct fields are not modified,
// and matching slice-typed fields are not reset before appending to
// them. In particular, it is recommended to pass a pointer to a zero
// valued struct on each LoadStruct call.
func LoadStruct(dst interface{}, p []Property) error {
x, err := newStructPLS(dst)
if err != nil {
return err
}
return x.Load(p)
}
// SaveStruct returns the properties from src as a slice of Properties.
// src must be a struct pointer.
func SaveStruct(src interface{}) ([]Property, error) {
x, err := newStructPLS(src)
if err != nil {
return nil, err
}
return x.Save()
}
// plsForLoad tries to convert v to a PropertyLoadSaver.
// If successful, plsForLoad returns a settable v as a PropertyLoadSaver.
//
// plsForLoad is intended to be used with nested struct fields which
// may implement PropertyLoadSaver.
//
// v must be settable.
func plsForLoad(v reflect.Value) (PropertyLoadSaver, error) {
var nilPtr bool
if v.Kind() == reflect.Ptr && v.IsNil() {
nilPtr = true
v.Set(reflect.New(v.Type().Elem()))
}
vpls, err := pls(v)
if nilPtr && (vpls == nil || err != nil) {
// unset v
v.Set(reflect.Zero(v.Type()))
}
return vpls, err
}
// plsForSave tries to convert v to a PropertyLoadSaver.
// If successful, plsForSave returns v as a PropertyLoadSaver.
//
// plsForSave is intended to be used with nested struct fields which
// may implement PropertyLoadSaver.
//
// v must be settable.
func plsForSave(v reflect.Value) (PropertyLoadSaver, error) {
switch v.Kind() {
case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface, reflect.Chan, reflect.Func:
// If v is nil, return early. v contains no data to save.
if v.IsNil() {
return nil, nil
}
}
return pls(v)
}
func pls(v reflect.Value) (PropertyLoadSaver, error) {
if v.Kind() != reflect.Ptr {
if _, ok := v.Interface().(PropertyLoadSaver); ok {
return nil, fmt.Errorf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface())
}
v = v.Addr()
}
vpls, _ := v.Interface().(PropertyLoadSaver)
return vpls, nil
}

View File

@ -1,784 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"encoding/base64"
"errors"
"fmt"
"math"
"reflect"
"strconv"
"strings"
"cloud.google.com/go/internal/trace"
wrapperspb "github.com/golang/protobuf/ptypes/wrappers"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
type operator int
const (
lessThan operator = iota + 1
lessEq
equal
greaterEq
greaterThan
keyFieldName = "__key__"
)
var operatorToProto = map[operator]pb.PropertyFilter_Operator{
lessThan: pb.PropertyFilter_LESS_THAN,
lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL,
equal: pb.PropertyFilter_EQUAL,
greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL,
greaterThan: pb.PropertyFilter_GREATER_THAN,
}
// filter is a conditional filter on query results.
type filter struct {
FieldName string
Op operator
Value interface{}
}
type sortDirection bool
const (
ascending sortDirection = false
descending sortDirection = true
)
var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{
ascending: pb.PropertyOrder_ASCENDING,
descending: pb.PropertyOrder_DESCENDING,
}
// order is a sort order on query results.
type order struct {
FieldName string
Direction sortDirection
}
// NewQuery creates a new Query for a specific entity kind.
//
// An empty kind means to return all entities, including entities created and
// managed by other App Engine features, and is called a kindless query.
// Kindless queries cannot include filters or sort orders on property values.
func NewQuery(kind string) *Query {
return &Query{
kind: kind,
limit: -1,
}
}
// Query represents a datastore query.
type Query struct {
kind string
ancestor *Key
filter []filter
order []order
projection []string
distinct bool
distinctOn []string
keysOnly bool
eventual bool
limit int32
offset int32
start []byte
end []byte
namespace string
trans *Transaction
err error
}
func (q *Query) clone() *Query {
x := *q
// Copy the contents of the slice-typed fields to a new backing store.
if len(q.filter) > 0 {
x.filter = make([]filter, len(q.filter))
copy(x.filter, q.filter)
}
if len(q.order) > 0 {
x.order = make([]order, len(q.order))
copy(x.order, q.order)
}
return &x
}
// Ancestor returns a derivative query with an ancestor filter.
// The ancestor should not be nil.
func (q *Query) Ancestor(ancestor *Key) *Query {
q = q.clone()
if ancestor == nil {
q.err = errors.New("datastore: nil query ancestor")
return q
}
q.ancestor = ancestor
return q
}
// EventualConsistency returns a derivative query that returns eventually
// consistent results.
// It only has an effect on ancestor queries.
func (q *Query) EventualConsistency() *Query {
q = q.clone()
q.eventual = true
return q
}
// Namespace returns a derivative query that is associated with the given
// namespace.
//
// A namespace may be used to partition data for multi-tenant applications.
// For details, see https://cloud.google.com/datastore/docs/concepts/multitenancy.
func (q *Query) Namespace(ns string) *Query {
q = q.clone()
q.namespace = ns
return q
}
// Transaction returns a derivative query that is associated with the given
// transaction.
//
// All reads performed as part of the transaction will come from a single
// consistent snapshot. Furthermore, if the transaction is set to a
// serializable isolation level, another transaction cannot concurrently modify
// the data that is read or modified by this transaction.
func (q *Query) Transaction(t *Transaction) *Query {
q = q.clone()
q.trans = t
return q
}
// Filter returns a derivative query with a field-based filter.
// The filterStr argument must be a field name followed by optional space,
// followed by an operator, one of ">", "<", ">=", "<=", or "=".
// Fields are compared against the provided value using the operator.
// Multiple filters are AND'ed together.
// Field names which contain spaces, quote marks, or operator characters
// should be passed as quoted Go string literals as returned by strconv.Quote
// or the fmt package's %q verb.
func (q *Query) Filter(filterStr string, value interface{}) *Query {
q = q.clone()
filterStr = strings.TrimSpace(filterStr)
if filterStr == "" {
q.err = fmt.Errorf("datastore: invalid filter %q", filterStr)
return q
}
f := filter{
FieldName: strings.TrimRight(filterStr, " ><=!"),
Value: value,
}
switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
case "<=":
f.Op = lessEq
case ">=":
f.Op = greaterEq
case "<":
f.Op = lessThan
case ">":
f.Op = greaterThan
case "=":
f.Op = equal
default:
q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
return q
}
var err error
f.FieldName, err = unquote(f.FieldName)
if err != nil {
q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName)
return q
}
q.filter = append(q.filter, f)
return q
}
// Order returns a derivative query with a field-based sort order. Orders are
// applied in the order they are added. The default order is ascending; to sort
// in descending order prefix the fieldName with a minus sign (-).
// Field names which contain spaces, quote marks, or the minus sign
// should be passed as quoted Go string literals as returned by strconv.Quote
// or the fmt package's %q verb.
func (q *Query) Order(fieldName string) *Query {
q = q.clone()
fieldName, dir := strings.TrimSpace(fieldName), ascending
if strings.HasPrefix(fieldName, "-") {
fieldName, dir = strings.TrimSpace(fieldName[1:]), descending
} else if strings.HasPrefix(fieldName, "+") {
q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
return q
}
fieldName, err := unquote(fieldName)
if err != nil {
q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName)
return q
}
if fieldName == "" {
q.err = errors.New("datastore: empty order")
return q
}
q.order = append(q.order, order{
Direction: dir,
FieldName: fieldName,
})
return q
}
// unquote optionally interprets s as a double-quoted or backquoted Go
// string literal if it begins with the relevant character.
func unquote(s string) (string, error) {
if s == "" || (s[0] != '`' && s[0] != '"') {
return s, nil
}
return strconv.Unquote(s)
}
// Project returns a derivative query that yields only the given fields. It
// cannot be used with KeysOnly.
func (q *Query) Project(fieldNames ...string) *Query {
q = q.clone()
q.projection = append([]string(nil), fieldNames...)
return q
}
// Distinct returns a derivative query that yields de-duplicated entities with
// respect to the set of projected fields. It is only used for projection
// queries. Distinct cannot be used with DistinctOn.
func (q *Query) Distinct() *Query {
q = q.clone()
q.distinct = true
return q
}
// DistinctOn returns a derivative query that yields de-duplicated entities with
// respect to the set of the specified fields. It is only used for projection
// queries. The field list should be a subset of the projected field list.
// DistinctOn cannot be used with Distinct.
func (q *Query) DistinctOn(fieldNames ...string) *Query {
q = q.clone()
q.distinctOn = fieldNames
return q
}
// KeysOnly returns a derivative query that yields only keys, not keys and
// entities. It cannot be used with projection queries.
func (q *Query) KeysOnly() *Query {
q = q.clone()
q.keysOnly = true
return q
}
// Limit returns a derivative query that has a limit on the number of results
// returned. A negative value means unlimited.
func (q *Query) Limit(limit int) *Query {
q = q.clone()
if limit < math.MinInt32 || limit > math.MaxInt32 {
q.err = errors.New("datastore: query limit overflow")
return q
}
q.limit = int32(limit)
return q
}
// Offset returns a derivative query that has an offset of how many keys to
// skip over before returning results. A negative value is invalid.
func (q *Query) Offset(offset int) *Query {
q = q.clone()
if offset < 0 {
q.err = errors.New("datastore: negative query offset")
return q
}
if offset > math.MaxInt32 {
q.err = errors.New("datastore: query offset overflow")
return q
}
q.offset = int32(offset)
return q
}
// Start returns a derivative query with the given start point.
func (q *Query) Start(c Cursor) *Query {
q = q.clone()
q.start = c.cc
return q
}
// End returns a derivative query with the given end point.
func (q *Query) End(c Cursor) *Query {
q = q.clone()
q.end = c.cc
return q
}
// toProto converts the query to a protocol buffer.
func (q *Query) toProto(req *pb.RunQueryRequest) error {
if len(q.projection) != 0 && q.keysOnly {
return errors.New("datastore: query cannot both project and be keys-only")
}
if len(q.distinctOn) != 0 && q.distinct {
return errors.New("datastore: query cannot be both distinct and distinct-on")
}
dst := &pb.Query{}
if q.kind != "" {
dst.Kind = []*pb.KindExpression{{Name: q.kind}}
}
if q.projection != nil {
for _, propertyName := range q.projection {
dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}})
}
for _, propertyName := range q.distinctOn {
dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName})
}
if q.distinct {
for _, propertyName := range q.projection {
dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName})
}
}
}
if q.keysOnly {
dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}}
}
var filters []*pb.Filter
for _, qf := range q.filter {
if qf.FieldName == "" {
return errors.New("datastore: empty query filter field name")
}
v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false)
if err != nil {
return fmt.Errorf("datastore: bad query filter value type: %v", err)
}
op, ok := operatorToProto[qf.Op]
if !ok {
return errors.New("datastore: unknown query filter operator")
}
xf := &pb.PropertyFilter{
Op: op,
Property: &pb.PropertyReference{Name: qf.FieldName},
Value: v,
}
filters = append(filters, &pb.Filter{
FilterType: &pb.Filter_PropertyFilter{PropertyFilter: xf},
})
}
if q.ancestor != nil {
filters = append(filters, &pb.Filter{
FilterType: &pb.Filter_PropertyFilter{PropertyFilter: &pb.PropertyFilter{
Property: &pb.PropertyReference{Name: keyFieldName},
Op: pb.PropertyFilter_HAS_ANCESTOR,
Value: &pb.Value{ValueType: &pb.Value_KeyValue{KeyValue: keyToProto(q.ancestor)}},
}}})
}
if len(filters) == 1 {
dst.Filter = filters[0]
} else if len(filters) > 1 {
dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{CompositeFilter: &pb.CompositeFilter{
Op: pb.CompositeFilter_AND,
Filters: filters,
}}}
}
for _, qo := range q.order {
if qo.FieldName == "" {
return errors.New("datastore: empty query order field name")
}
xo := &pb.PropertyOrder{
Property: &pb.PropertyReference{Name: qo.FieldName},
Direction: sortDirectionToProto[qo.Direction],
}
dst.Order = append(dst.Order, xo)
}
if q.limit >= 0 {
dst.Limit = &wrapperspb.Int32Value{Value: q.limit}
}
dst.Offset = q.offset
dst.StartCursor = q.start
dst.EndCursor = q.end
if t := q.trans; t != nil {
if t.id == nil {
return errExpiredTransaction
}
if q.eventual {
return errors.New("datastore: cannot use EventualConsistency query in a transaction")
}
req.ReadOptions = &pb.ReadOptions{
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
}
}
if q.eventual {
req.ReadOptions = &pb.ReadOptions{ConsistencyType: &pb.ReadOptions_ReadConsistency_{ReadConsistency: pb.ReadOptions_EVENTUAL}}
}
req.QueryType = &pb.RunQueryRequest_Query{Query: dst}
return nil
}
// Count returns the number of results for the given query.
//
// The running time and number of API calls made by Count scale linearly with
// the sum of the query's offset and limit. Unless the result count is
// expected to be small, it is best to specify a limit; otherwise Count will
// continue until it finishes counting or the provided context expires.
func (c *Client) Count(ctx context.Context, q *Query) (n int, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Count")
defer func() { trace.EndSpan(ctx, err) }()
// Check that the query is well-formed.
if q.err != nil {
return 0, q.err
}
// Create a copy of the query, with keysOnly true (if we're not a projection,
// since the two are incompatible).
newQ := q.clone()
newQ.keysOnly = len(newQ.projection) == 0
// Create an iterator and use it to walk through the batches of results
// directly.
it := c.Run(ctx, newQ)
for {
err := it.nextBatch()
if err == iterator.Done {
return n, nil
}
if err != nil {
return 0, err
}
n += len(it.results)
}
}
// GetAll runs the provided query in the given context and returns all keys
// that match that query, as well as appending the values to dst.
//
// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
//
// As a special case, *PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when *[]PropertyList was intended.
//
// The keys returned by GetAll will be in a 1-1 correspondence with the entities
// added to dst.
//
// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
//
// The running time and number of API calls made by GetAll scale linearly with
// with the sum of the query's offset and limit. Unless the result count is
// expected to be small, it is best to specify a limit; otherwise GetAll will
// continue until it finishes collecting results or the provided context
// expires.
func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) (keys []*Key, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.GetAll")
defer func() { trace.EndSpan(ctx, err) }()
var (
dv reflect.Value
mat multiArgType
elemType reflect.Type
errFieldMismatch error
)
if !q.keysOnly {
dv = reflect.ValueOf(dst)
if dv.Kind() != reflect.Ptr || dv.IsNil() {
return nil, ErrInvalidEntityType
}
dv = dv.Elem()
mat, elemType = checkMultiArg(dv)
if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
return nil, ErrInvalidEntityType
}
}
for t := c.Run(ctx, q); ; {
k, e, err := t.next()
if err == iterator.Done {
break
}
if err != nil {
return keys, err
}
if !q.keysOnly {
ev := reflect.New(elemType)
if elemType.Kind() == reflect.Map {
// This is a special case. The zero values of a map type are
// not immediately useful; they have to be make'd.
//
// Funcs and channels are similar, in that a zero value is not useful,
// but even a freshly make'd channel isn't useful: there's no fixed
// channel buffer size that is always going to be large enough, and
// there's no goroutine to drain the other end. Theoretically, these
// types could be supported, for example by sniffing for a constructor
// method or requiring prior registration, but for now it's not a
// frequent enough concern to be worth it. Programmers can work around
// it by explicitly using Iterator.Next instead of the Query.GetAll
// convenience method.
x := reflect.MakeMap(elemType)
ev.Elem().Set(x)
}
if err = loadEntityProto(ev.Interface(), e); err != nil {
if _, ok := err.(*ErrFieldMismatch); ok {
// We continue loading entities even in the face of field mismatch errors.
// If we encounter any other error, that other error is returned. Otherwise,
// an ErrFieldMismatch is returned.
errFieldMismatch = err
} else {
return keys, err
}
}
if mat != multiArgTypeStructPtr {
ev = ev.Elem()
}
dv.Set(reflect.Append(dv, ev))
}
keys = append(keys, k)
}
return keys, errFieldMismatch
}
// Run runs the given query in the given context.
func (c *Client) Run(ctx context.Context, q *Query) *Iterator {
if q.err != nil {
return &Iterator{err: q.err}
}
t := &Iterator{
ctx: ctx,
client: c,
limit: q.limit,
offset: q.offset,
keysOnly: q.keysOnly,
pageCursor: q.start,
entityCursor: q.start,
req: &pb.RunQueryRequest{
ProjectId: c.dataset,
},
}
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Query.Run")
defer func() { trace.EndSpan(ctx, t.err) }()
if q.namespace != "" {
t.req.PartitionId = &pb.PartitionId{
NamespaceId: q.namespace,
}
}
if err := q.toProto(t.req); err != nil {
t.err = err
}
return t
}
// Iterator is the result of running a query.
type Iterator struct {
ctx context.Context
client *Client
err error
// results is the list of EntityResults still to be iterated over from the
// most recent API call. It will be nil if no requests have yet been issued.
results []*pb.EntityResult
// req is the request to send. It may be modified and used multiple times.
req *pb.RunQueryRequest
// limit is the limit on the number of results this iterator should return.
// The zero value is used to prevent further fetches from the server.
// A negative value means unlimited.
limit int32
// offset is the number of results that still need to be skipped.
offset int32
// keysOnly records whether the query was keys-only (skip entity loading).
keysOnly bool
// pageCursor is the compiled cursor for the next batch/page of result.
// TODO(djd): Can we delete this in favour of paging with the last
// entityCursor from each batch?
pageCursor []byte
// entityCursor is the compiled cursor of the next result.
entityCursor []byte
}
// Next returns the key of the next result. When there are no more results,
// iterator.Done is returned as the error.
//
// If the query is not keys only and dst is non-nil, it also loads the entity
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
// the same semantics and possible errors as for the Get function.
func (t *Iterator) Next(dst interface{}) (k *Key, err error) {
k, e, err := t.next()
if err != nil {
return nil, err
}
if dst != nil && !t.keysOnly {
err = loadEntityProto(dst, e)
}
return k, err
}
func (t *Iterator) next() (*Key, *pb.Entity, error) {
// Fetch additional batches while there are no more results.
for t.err == nil && len(t.results) == 0 {
t.err = t.nextBatch()
}
if t.err != nil {
return nil, nil, t.err
}
// Extract the next result, update cursors, and parse the entity's key.
e := t.results[0]
t.results = t.results[1:]
t.entityCursor = e.Cursor
if len(t.results) == 0 {
t.entityCursor = t.pageCursor // At the end of the batch.
}
if e.Entity.Key == nil {
return nil, nil, errors.New("datastore: internal error: server did not return a key")
}
k, err := protoToKey(e.Entity.Key)
if err != nil || k.Incomplete() {
return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
}
return k, e.Entity, nil
}
// nextBatch makes a single call to the server for a batch of results.
func (t *Iterator) nextBatch() error {
if t.limit == 0 {
return iterator.Done // Short-circuits the zero-item response.
}
// Adjust the query with the latest start cursor, limit and offset.
q := t.req.GetQuery()
q.StartCursor = t.pageCursor
q.Offset = t.offset
if t.limit >= 0 {
q.Limit = &wrapperspb.Int32Value{Value: t.limit}
} else {
q.Limit = nil
}
// Run the query.
resp, err := t.client.client.RunQuery(t.ctx, t.req)
if err != nil {
return err
}
// Adjust any offset from skipped results.
skip := resp.Batch.SkippedResults
if skip < 0 {
return errors.New("datastore: internal error: negative number of skipped_results")
}
t.offset -= skip
if t.offset < 0 {
return errors.New("datastore: internal error: query skipped too many results")
}
if t.offset > 0 && len(resp.Batch.EntityResults) > 0 {
return errors.New("datastore: internal error: query returned results before requested offset")
}
// Adjust the limit.
if t.limit >= 0 {
t.limit -= int32(len(resp.Batch.EntityResults))
if t.limit < 0 {
return errors.New("datastore: internal error: query returned more results than the limit")
}
}
// If there are no more results available, set limit to zero to prevent
// further fetches. Otherwise, check that there is a next page cursor available.
if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED {
t.limit = 0
} else if resp.Batch.EndCursor == nil {
return errors.New("datastore: internal error: server did not return a cursor")
}
// Update cursors.
// If any results were skipped, use the SkippedCursor as the next entity cursor.
if skip > 0 {
t.entityCursor = resp.Batch.SkippedCursor
} else {
t.entityCursor = q.StartCursor
}
t.pageCursor = resp.Batch.EndCursor
t.results = resp.Batch.EntityResults
return nil
}
// Cursor returns a cursor for the iterator's current location.
func (t *Iterator) Cursor() (c Cursor, err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Query.Cursor")
defer func() { trace.EndSpan(t.ctx, err) }()
// If there is still an offset, we need to the skip those results first.
for t.err == nil && t.offset > 0 {
t.err = t.nextBatch()
}
if t.err != nil && t.err != iterator.Done {
return Cursor{}, t.err
}
return Cursor{t.entityCursor}, nil
}
// Cursor is an iterator's position. It can be converted to and from an opaque
// string. A cursor can be used from different HTTP requests, but only with a
// query with the same kind, ancestor, filter and order constraints.
//
// The zero Cursor can be used to indicate that there is no start and/or end
// constraint for a query.
type Cursor struct {
cc []byte
}
// String returns a base-64 string representation of a cursor.
func (c Cursor) String() string {
if c.cc == nil {
return ""
}
return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=")
}
// Decode decodes a cursor from its base-64 string representation.
func DecodeCursor(s string) (Cursor, error) {
if s == "" {
return Cursor{}, nil
}
if n := len(s) % 4; n != 0 {
s += strings.Repeat("=", 4-n)
}
b, err := base64.URLEncoding.DecodeString(s)
if err != nil {
return Cursor{}, err
}
return Cursor{b}, nil
}

View File

@ -1,470 +0,0 @@
// Copyright 4 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"fmt"
"reflect"
"time"
"unicode/utf8"
timepb "github.com/golang/protobuf/ptypes/timestamp"
pb "google.golang.org/genproto/googleapis/datastore/v1"
llpb "google.golang.org/genproto/googleapis/type/latlng"
)
type saveOpts struct {
noIndex bool
flatten bool
omitEmpty bool
}
// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
func saveEntity(key *Key, src interface{}) (*pb.Entity, error) {
var err error
var props []Property
if e, ok := src.(PropertyLoadSaver); ok {
props, err = e.Save()
} else {
props, err = SaveStruct(src)
}
if err != nil {
return nil, err
}
return propertiesToProto(key, props)
}
// TODO(djd): Convert this and below to return ([]Property, error).
func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
p := Property{
Name: name,
NoIndex: opts.noIndex,
}
if opts.omitEmpty && isEmptyValue(v) {
return nil
}
// First check if field type implements PLS. If so, use PLS to
// save.
ok, err := plsFieldSave(props, p, name, opts, v)
if err != nil {
return err
}
if ok {
return nil
}
switch x := v.Interface().(type) {
case *Key, time.Time, GeoPoint:
p.Value = x
default:
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.Value = v.Int()
case reflect.Bool:
p.Value = v.Bool()
case reflect.String:
p.Value = v.String()
case reflect.Float32, reflect.Float64:
p.Value = v.Float()
case reflect.Slice:
if v.Type().Elem().Kind() == reflect.Uint8 {
p.Value = v.Bytes()
} else {
return saveSliceProperty(props, name, opts, v)
}
case reflect.Ptr:
if isValidPointerType(v.Type().Elem()) {
if v.IsNil() {
// Nil pointer becomes a nil property value (unless omitempty, handled above).
p.Value = nil
*props = append(*props, p)
return nil
}
// When we recurse on the derefenced pointer, omitempty no longer applies:
// we already know the pointer is not empty, it doesn't matter if its referent
// is empty or not.
opts.omitEmpty = false
return saveStructProperty(props, name, opts, v.Elem())
}
if v.Type().Elem().Kind() != reflect.Struct {
return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type())
}
// Pointer to struct is a special case.
if v.IsNil() {
return nil
}
v = v.Elem()
fallthrough
case reflect.Struct:
if !v.CanAddr() {
return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
}
vi := v.Addr().Interface()
sub, err := newStructPLS(vi)
if err != nil {
return fmt.Errorf("datastore: unsupported struct field: %v", err)
}
if opts.flatten {
return sub.save(props, opts, name+".")
}
var subProps []Property
err = sub.save(&subProps, opts, "")
if err != nil {
return err
}
subKey, err := sub.key(v)
if err != nil {
return err
}
p.Value = &Entity{
Key: subKey,
Properties: subProps,
}
}
}
if p.Value == nil {
return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
}
*props = append(*props, p)
return nil
}
// plsFieldSave first tries to converts v's value to a PLS, then v's addressed
// value to a PLS. If neither succeeds, plsFieldSave returns false for first return
// value.
// If v is successfully converted to a PLS, plsFieldSave will then add the
// Value to property p by way of the PLS's Save method, and append it to props.
//
// If the flatten option is present in opts, name must be prepended to each property's
// name before it is appended to props. Eg. if name were "A" and a subproperty's name
// were "B", the resultant name of the property to be appended to props would be "A.B".
func plsFieldSave(props *[]Property, p Property, name string, opts saveOpts, v reflect.Value) (ok bool, err error) {
vpls, err := plsForSave(v)
if err != nil {
return false, err
}
if vpls == nil {
return false, nil
}
subProps, err := vpls.Save()
if err != nil {
return true, err
}
if opts.flatten {
for _, subp := range subProps {
subp.Name = name + "." + subp.Name
*props = append(*props, subp)
}
return true, nil
}
p.Value = &Entity{Properties: subProps}
*props = append(*props, p)
return true, nil
}
// key extracts the *Key struct field from struct v based on the structCodec of s.
func (s structPLS) key(v reflect.Value) (*Key, error) {
if v.Kind() != reflect.Struct {
return nil, errors.New("datastore: cannot save key of non-struct type")
}
keyField := s.codec.Match(keyFieldName)
if keyField == nil {
return nil, nil
}
f := v.FieldByIndex(keyField.Index)
k, ok := f.Interface().(*Key)
if !ok {
return nil, fmt.Errorf("datastore: %s field on struct %T is not a *datastore.Key", keyFieldName, v.Interface())
}
return k, nil
}
func saveSliceProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
// Easy case: if the slice is empty, we're done.
if v.Len() == 0 {
return nil
}
// Work out the properties generated by the first element in the slice. This will
// usually be a single property, but will be more if this is a slice of structs.
var headProps []Property
if err := saveStructProperty(&headProps, name, opts, v.Index(0)); err != nil {
return err
}
// Convert the first element's properties into slice properties, and
// keep track of the values in a map.
values := make(map[string][]interface{}, len(headProps))
for _, p := range headProps {
values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value)
}
// Find the elements for the subsequent elements.
for i := 1; i < v.Len(); i++ {
elemProps := make([]Property, 0, len(headProps))
if err := saveStructProperty(&elemProps, name, opts, v.Index(i)); err != nil {
return err
}
for _, p := range elemProps {
v, ok := values[p.Name]
if !ok {
return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i)
}
values[p.Name] = append(v, p.Value)
}
}
// Convert to the final properties.
for _, p := range headProps {
p.Value = values[p.Name]
*props = append(*props, p)
}
return nil
}
func (s structPLS) Save() ([]Property, error) {
var props []Property
if err := s.save(&props, saveOpts{}, ""); err != nil {
return nil, err
}
return props, nil
}
func (s structPLS) save(props *[]Property, opts saveOpts, prefix string) error {
for _, f := range s.codec {
name := prefix + f.Name
v := getField(s.v, f.Index)
if !v.IsValid() || !v.CanSet() {
continue
}
var tagOpts saveOpts
if f.ParsedTag != nil {
tagOpts = f.ParsedTag.(saveOpts)
}
var opts1 saveOpts
opts1.noIndex = opts.noIndex || tagOpts.noIndex
opts1.flatten = opts.flatten || tagOpts.flatten
opts1.omitEmpty = tagOpts.omitEmpty // don't propagate
if err := saveStructProperty(props, name, opts1, v); err != nil {
return err
}
}
return nil
}
// getField returns the field from v at the given index path.
// If it encounters a nil-valued field in the path, getField
// stops and returns a zero-valued reflect.Value, preventing the
// panic that would have been caused by reflect's FieldByIndex.
func getField(v reflect.Value, index []int) reflect.Value {
var zero reflect.Value
if v.Type().Kind() != reflect.Struct {
return zero
}
for _, i := range index {
if v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Struct {
if v.IsNil() {
return zero
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) {
e := &pb.Entity{
Key: keyToProto(key),
Properties: map[string]*pb.Value{},
}
indexedProps := 0
for _, p := range props {
// Do not send a Key value a a field to datastore.
if p.Name == keyFieldName {
continue
}
val, err := interfaceToProto(p.Value, p.NoIndex)
if err != nil {
return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name)
}
if !p.NoIndex {
rVal := reflect.ValueOf(p.Value)
if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 {
indexedProps += rVal.Len()
} else {
indexedProps++
}
}
if indexedProps > maxIndexedProperties {
return nil, errors.New("datastore: too many indexed properties")
}
if _, ok := e.Properties[p.Name]; ok {
return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name)
}
e.Properties[p.Name] = val
}
return e, nil
}
func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
val := &pb.Value{ExcludeFromIndexes: noIndex}
switch v := iv.(type) {
case int:
val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)}
case int32:
val.ValueType = &pb.Value_IntegerValue{IntegerValue: int64(v)}
case int64:
val.ValueType = &pb.Value_IntegerValue{IntegerValue: v}
case bool:
val.ValueType = &pb.Value_BooleanValue{BooleanValue: v}
case string:
if len(v) > 1500 && !noIndex {
return nil, errors.New("string property too long to index")
}
if !utf8.ValidString(v) {
return nil, fmt.Errorf("string is not valid utf8: %q", v)
}
val.ValueType = &pb.Value_StringValue{StringValue: v}
case float32:
val.ValueType = &pb.Value_DoubleValue{DoubleValue: float64(v)}
case float64:
val.ValueType = &pb.Value_DoubleValue{DoubleValue: v}
case *Key:
if v == nil {
val.ValueType = &pb.Value_NullValue{}
} else {
val.ValueType = &pb.Value_KeyValue{KeyValue: keyToProto(v)}
}
case GeoPoint:
if !v.Valid() {
return nil, errors.New("invalid GeoPoint value")
}
val.ValueType = &pb.Value_GeoPointValue{GeoPointValue: &llpb.LatLng{
Latitude: v.Lat,
Longitude: v.Lng,
}}
case time.Time:
if v.Before(minTime) || v.After(maxTime) {
return nil, errors.New("time value out of range")
}
val.ValueType = &pb.Value_TimestampValue{TimestampValue: &timepb.Timestamp{
Seconds: v.Unix(),
Nanos: int32(v.Nanosecond()),
}}
case []byte:
if len(v) > 1500 && !noIndex {
return nil, errors.New("[]byte property too long to index")
}
val.ValueType = &pb.Value_BlobValue{BlobValue: v}
case *Entity:
e, err := propertiesToProto(v.Key, v.Properties)
if err != nil {
return nil, err
}
val.ValueType = &pb.Value_EntityValue{EntityValue: e}
case []interface{}:
arr := make([]*pb.Value, 0, len(v))
for i, v := range v {
elem, err := interfaceToProto(v, noIndex)
if err != nil {
return nil, fmt.Errorf("%v at index %d", err, i)
}
arr = append(arr, elem)
}
val.ValueType = &pb.Value_ArrayValue{ArrayValue: &pb.ArrayValue{Values: arr}}
// ArrayValues have ExcludeFromIndexes set on the individual items, rather
// than the top-level value.
val.ExcludeFromIndexes = false
default:
rv := reflect.ValueOf(iv)
if !rv.IsValid() {
val.ValueType = &pb.Value_NullValue{}
} else if rv.Kind() == reflect.Ptr { // non-nil pointer: dereference
if rv.IsNil() {
val.ValueType = &pb.Value_NullValue{}
return val, nil
}
return interfaceToProto(rv.Elem().Interface(), noIndex)
} else {
return nil, fmt.Errorf("invalid Value type %T", iv)
}
}
// TODO(jbd): Support EntityValue.
return val, nil
}
// isEmptyValue is taken from the encoding/json package in the
// standard library.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Struct:
if t, ok := v.Interface().(time.Time); ok {
return t.IsZero()
}
}
return false
}
// isValidPointerType reports whether a struct field can be a pointer to type t
// for the purposes of saving and loading.
func isValidPointerType(t reflect.Type) bool {
if t == typeOfTime || t == typeOfGeoPoint {
return true
}
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
case reflect.Bool:
return true
case reflect.String:
return true
case reflect.Float32, reflect.Float64:
return true
}
return false
}

View File

@ -1,36 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"math"
"time"
)
var (
minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
)
func toUnixMicro(t time.Time) int64 {
// We cannot use t.UnixNano() / 1e3 because we want to handle times more than
// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
// be represented in the numerator of a single int64 divide.
return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
}
func fromUnixMicro(t int64) time.Time {
return time.Unix(t/1e6, (t%1e6)*1e3)
}

View File

@ -1,409 +0,0 @@
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"errors"
"cloud.google.com/go/internal/trace"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
pb "google.golang.org/genproto/googleapis/datastore/v1"
)
// ErrConcurrentTransaction is returned when a transaction is rolled back due
// to a conflict with a concurrent transaction.
var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
var errExpiredTransaction = errors.New("datastore: transaction expired")
type transactionSettings struct {
attempts int
readOnly bool
prevID []byte // ID of the transaction to retry
}
// newTransactionSettings creates a transactionSettings with a given TransactionOption slice.
// Unconfigured options will be set to default values.
func newTransactionSettings(opts []TransactionOption) *transactionSettings {
s := &transactionSettings{attempts: 3}
for _, o := range opts {
o.apply(s)
}
return s
}
// TransactionOption configures the way a transaction is executed.
type TransactionOption interface {
apply(*transactionSettings)
}
// MaxAttempts returns a TransactionOption that overrides the default 3 attempt times.
func MaxAttempts(attempts int) TransactionOption {
return maxAttempts(attempts)
}
type maxAttempts int
func (w maxAttempts) apply(s *transactionSettings) {
if w > 0 {
s.attempts = int(w)
}
}
// ReadOnly is a TransactionOption that marks the transaction as read-only.
var ReadOnly TransactionOption
func init() {
ReadOnly = readOnly{}
}
type readOnly struct{}
func (readOnly) apply(s *transactionSettings) {
s.readOnly = true
}
// Transaction represents a set of datastore operations to be committed atomically.
//
// Operations are enqueued by calling the Put and Delete methods on Transaction
// (or their Multi-equivalents). These operations are only committed when the
// Commit method is invoked. To ensure consistency, reads must be performed by
// using Transaction's Get method or by using the Transaction method when
// building a query.
//
// A Transaction must be committed or rolled back exactly once.
type Transaction struct {
id []byte
client *Client
ctx context.Context
mutations []*pb.Mutation // The mutations to apply.
pending map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion.
}
// NewTransaction starts a new transaction.
func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (t *Transaction, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.NewTransaction")
defer func() { trace.EndSpan(ctx, err) }()
for _, o := range opts {
if _, ok := o.(maxAttempts); ok {
return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option")
}
}
return c.newTransaction(ctx, newTransactionSettings(opts))
}
func (c *Client) newTransaction(ctx context.Context, s *transactionSettings) (*Transaction, error) {
req := &pb.BeginTransactionRequest{ProjectId: c.dataset}
if s.readOnly {
req.TransactionOptions = &pb.TransactionOptions{
Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}},
}
} else if s.prevID != nil {
req.TransactionOptions = &pb.TransactionOptions{
Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{
PreviousTransaction: s.prevID,
}},
}
}
resp, err := c.client.BeginTransaction(ctx, req)
if err != nil {
return nil, err
}
return &Transaction{
id: resp.Transaction,
ctx: ctx,
client: c,
mutations: nil,
pending: make(map[int]*PendingKey),
}, nil
}
// RunInTransaction runs f in a transaction. f is invoked with a Transaction
// that f should use for all the transaction's datastore operations.
//
// f must not call Commit or Rollback on the provided Transaction.
//
// If f returns nil, RunInTransaction commits the transaction,
// returning the Commit and a nil error if it succeeds. If the commit fails due
// to a conflicting transaction, RunInTransaction retries f with a new
// Transaction. It gives up and returns ErrConcurrentTransaction after three
// failed attempts (or as configured with MaxAttempts).
//
// If f returns non-nil, then the transaction will be rolled back and
// RunInTransaction will return the same error. The function f is not retried.
//
// Note that when f returns, the transaction is not committed. Calling code
// must not assume that any of f's changes have been committed until
// RunInTransaction returns nil.
//
// Since f may be called multiple times, f should usually be idempotent that
// is, it should have the same result when called multiple times. Note that
// Transaction.Get will append when unmarshalling slice fields, so it is not
// necessarily idempotent.
func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (cmt *Commit, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.RunInTransaction")
defer func() { trace.EndSpan(ctx, err) }()
settings := newTransactionSettings(opts)
for n := 0; n < settings.attempts; n++ {
tx, err := c.newTransaction(ctx, settings)
if err != nil {
return nil, err
}
if err := f(tx); err != nil {
_ = tx.Rollback()
return nil, err
}
if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
return cmt, err
}
// Pass this transaction's ID to the retry transaction to preserve
// transaction priority.
if !settings.readOnly {
settings.prevID = tx.id
}
}
return nil, ErrConcurrentTransaction
}
// Commit applies the enqueued operations atomically.
func (t *Transaction) Commit() (c *Commit, err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Commit")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return nil, errExpiredTransaction
}
req := &pb.CommitRequest{
ProjectId: t.client.dataset,
TransactionSelector: &pb.CommitRequest_Transaction{Transaction: t.id},
Mutations: t.mutations,
Mode: pb.CommitRequest_TRANSACTIONAL,
}
t.id = nil
resp, err := t.client.client.Commit(t.ctx, req)
if err != nil {
if grpc.Code(err) == codes.Aborted {
return nil, ErrConcurrentTransaction
}
return nil, err
}
// Copy any newly minted keys into the returned keys.
for i, p := range t.pending {
if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil {
return nil, errors.New("datastore: internal error: server returned the wrong mutation results")
}
key, err := protoToKey(resp.MutationResults[i].Key)
if err != nil {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
p.key = key
p.commit = c
}
return c, nil
}
// Rollback abandons a pending transaction.
func (t *Transaction) Rollback() (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Rollback")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return errExpiredTransaction
}
id := t.id
t.id = nil
_, err = t.client.client.Rollback(t.ctx, &pb.RollbackRequest{
ProjectId: t.client.dataset,
Transaction: id,
})
return err
}
// Get is the transaction-specific version of the package function Get.
// All reads performed during the transaction will come from a single consistent
// snapshot. Furthermore, if the transaction is set to a serializable isolation
// level, another transaction cannot concurrently modify the data that is read
// or modified by this transaction.
func (t *Transaction) Get(key *Key, dst interface{}) (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.Get")
defer func() { trace.EndSpan(t.ctx, err) }()
opts := &pb.ReadOptions{
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
}
err = t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts)
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
func (t *Transaction) GetMulti(keys []*Key, dst interface{}) (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.GetMulti")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return errExpiredTransaction
}
opts := &pb.ReadOptions{
ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id},
}
return t.client.get(t.ctx, keys, dst, opts)
}
// Put is the transaction-specific version of the package function Put.
//
// Put returns a PendingKey which can be resolved into a Key using the
// return value from a successful Commit. If key is an incomplete key, the
// returned pending key will resolve to a unique key generated by the
// datastore.
func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) {
h, err := t.PutMulti([]*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(MultiError); ok {
return nil, me[0]
}
return nil, err
}
return h[0], nil
}
// PutMulti is a batch version of Put. One PendingKey is returned for each
// element of src in the same order.
// TODO(jba): rewrite in terms of Mutate.
func (t *Transaction) PutMulti(keys []*Key, src interface{}) (ret []*PendingKey, err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.PutMulti")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return nil, errExpiredTransaction
}
mutations, err := putMutations(keys, src)
if err != nil {
return nil, err
}
origin := len(t.mutations)
t.mutations = append(t.mutations, mutations...)
// Prepare the returned handles, pre-populating where possible.
ret = make([]*PendingKey, len(keys))
for i, key := range keys {
p := &PendingKey{}
if key.Incomplete() {
// This key will be in the final commit result.
t.pending[origin+i] = p
} else {
p.key = key
}
ret[i] = p
}
return ret, nil
}
// Delete is the transaction-specific version of the package function Delete.
// Delete enqueues the deletion of the entity for the given key, to be
// committed atomically upon calling Commit.
func (t *Transaction) Delete(key *Key) error {
err := t.DeleteMulti([]*Key{key})
if me, ok := err.(MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
// TODO(jba): rewrite in terms of Mutate.
func (t *Transaction) DeleteMulti(keys []*Key) (err error) {
t.ctx = trace.StartSpan(t.ctx, "cloud.google.com/go/datastore.Transaction.DeleteMulti")
defer func() { trace.EndSpan(t.ctx, err) }()
if t.id == nil {
return errExpiredTransaction
}
mutations, err := deleteMutations(keys)
if err != nil {
return err
}
t.mutations = append(t.mutations, mutations...)
return nil
}
// Mutate adds the mutations to the transaction. They will all be applied atomically
// upon calling Commit. Mutate returns a PendingKey for each Mutation in the argument
// list, in the same order. PendingKeys for Delete mutations are always nil.
//
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
// Mutate returns a MultiError in this case even if there is only one Mutation.
//
// For an example, see Client.Mutate.
func (t *Transaction) Mutate(muts ...*Mutation) ([]*PendingKey, error) {
if t.id == nil {
return nil, errExpiredTransaction
}
pmuts, err := mutationProtos(muts)
if err != nil {
return nil, err
}
origin := len(t.mutations)
t.mutations = append(t.mutations, pmuts...)
// Prepare the returned handles, pre-populating where possible.
ret := make([]*PendingKey, len(muts))
for i, mut := range muts {
if mut.isDelete() {
continue
}
p := &PendingKey{}
if mut.key.Incomplete() {
// This key will be in the final commit result.
t.pending[origin+i] = p
} else {
p.key = mut.key
}
ret[i] = p
}
return ret, nil
}
// Commit represents the result of a committed transaction.
type Commit struct{}
// Key resolves a pending key handle into a final key.
func (c *Commit) Key(p *PendingKey) *Key {
if p == nil { // if called on a *PendingKey from a Delete mutation
return nil
}
// If p.commit is nil, the PendingKey did not come from an incomplete key,
// so p.key is valid.
if p.commit != nil && c != p.commit {
panic("PendingKey was not created by corresponding transaction")
}
return p.key
}
// PendingKey represents the key for newly-inserted entity. It can be
// resolved into a Key by calling the Key method of Commit.
type PendingKey struct {
key *Key
commit *Commit
}

View File

@ -1,54 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/status"
)
// Annotate prepends msg to the error message in err, attempting
// to preserve other information in err, like an error code.
//
// Annotate panics if err is nil.
//
// Annotate knows about these error types:
// - "google.golang.org/grpc/status".Status
// - "google.golang.org/api/googleapi".Error
// If the error is not one of these types, Annotate behaves
// like
// fmt.Errorf("%s: %v", msg, err)
func Annotate(err error, msg string) error {
if err == nil {
panic("Annotate called with nil")
}
if s, ok := status.FromError(err); ok {
p := s.Proto()
p.Message = msg + ": " + p.Message
return status.ErrorProto(p)
}
if g, ok := err.(*googleapi.Error); ok {
g.Message = msg + ": " + g.Message
return g
}
return fmt.Errorf("%s: %v", msg, err)
}
// Annotatef uses format and args to format a string, then calls Annotate.
func Annotatef(err error, format string, args ...interface{}) error {
return Annotate(err, fmt.Sprintf(format, args...))
}

View File

@ -1,58 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package atomiccache provides a map-based cache that supports very fast
// reads.
package atomiccache
import (
"sync"
"sync/atomic"
)
type mapType map[interface{}]interface{}
// Cache is a map-based cache that supports fast reads via use of atomics.
// Writes are slow, requiring a copy of the entire cache.
// The zero Cache is an empty cache, ready for use.
type Cache struct {
val atomic.Value // mapType
mu sync.Mutex // used only by writers
}
// Get returns the value of the cache at key. If there is no value,
// getter is called to provide one, and the cache is updated.
// The getter function may be called concurrently. It should be pure,
// returning the same value for every call.
func (c *Cache) Get(key interface{}, getter func() interface{}) interface{} {
mp, _ := c.val.Load().(mapType)
if v, ok := mp[key]; ok {
return v
}
// Compute value without lock.
// Might duplicate effort but won't hold other computations back.
newV := getter()
c.mu.Lock()
mp, _ = c.val.Load().(mapType)
newM := make(mapType, len(mp)+1)
for k, v := range mp {
newM[k] = v
}
newM[key] = newV
c.val.Store(newM)
c.mu.Unlock()
return newV
}

View File

@ -1,468 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fields provides a view of the fields of a struct that follows the Go
// rules, amended to consider tags and case insensitivity.
//
// Usage
//
// First define a function that interprets tags:
//
// func parseTag(st reflect.StructTag) (name string, keep bool, other interface{}, err error) { ... }
//
// The function's return values describe whether to ignore the field
// completely or provide an alternate name, as well as other data from the
// parse that is stored to avoid re-parsing.
//
// Then define a function to validate the type:
//
// func validate(t reflect.Type) error { ... }
//
// Then, if necessary, define a function to specify leaf types - types
// which should be considered one field and not be recursed into:
//
// func isLeafType(t reflect.Type) bool { ... }
//
// eg:
//
// func isLeafType(t reflect.Type) bool {
// return t == reflect.TypeOf(time.Time{})
// }
//
// Next, construct a Cache, passing your functions. As its name suggests, a
// Cache remembers validation and field information for a type, so subsequent
// calls with the same type are very fast.
//
// cache := fields.NewCache(parseTag, validate, isLeafType)
//
// To get the fields of a struct type as determined by the above rules, call
// the Fields method:
//
// fields, err := cache.Fields(reflect.TypeOf(MyStruct{}))
//
// The return value can be treated as a slice of Fields.
//
// Given a string, such as a key or column name obtained during unmarshalling,
// call Match on the list of fields to find a field whose name is the best
// match:
//
// field := fields.Match(name)
//
// Match looks for an exact match first, then falls back to a case-insensitive
// comparison.
package fields
import (
"bytes"
"errors"
"reflect"
"sort"
"strings"
"cloud.google.com/go/internal/atomiccache"
)
// A Field records information about a struct field.
type Field struct {
Name string // effective field name
NameFromTag bool // did Name come from a tag?
Type reflect.Type // field type
Index []int // index sequence, for reflect.Value.FieldByIndex
ParsedTag interface{} // third return value of the parseTag function
nameBytes []byte
equalFold func(s, t []byte) bool
}
type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interface{}, err error)
type ValidateFunc func(reflect.Type) error
type LeafTypesFunc func(reflect.Type) bool
// A Cache records information about the fields of struct types.
//
// A Cache is safe for use by multiple goroutines.
type Cache struct {
parseTag ParseTagFunc
validate ValidateFunc
leafTypes LeafTypesFunc
cache atomiccache.Cache // from reflect.Type to cacheValue
}
// NewCache constructs a Cache.
//
// Its first argument should be a function that accepts
// a struct tag and returns four values: an alternative name for the field
// extracted from the tag, a boolean saying whether to keep the field or ignore
// it, additional data that is stored with the field information to avoid
// having to parse the tag again, and an error.
//
// Its second argument should be a function that accepts a reflect.Type and
// returns an error if the struct type is invalid in any way. For example, it
// may check that all of the struct field tags are valid, or that all fields
// are of an appropriate type.
func NewCache(parseTag ParseTagFunc, validate ValidateFunc, leafTypes LeafTypesFunc) *Cache {
if parseTag == nil {
parseTag = func(reflect.StructTag) (string, bool, interface{}, error) {
return "", true, nil, nil
}
}
if validate == nil {
validate = func(reflect.Type) error {
return nil
}
}
if leafTypes == nil {
leafTypes = func(reflect.Type) bool {
return false
}
}
return &Cache{
parseTag: parseTag,
validate: validate,
leafTypes: leafTypes,
}
}
// A fieldScan represents an item on the fieldByNameFunc scan work list.
type fieldScan struct {
typ reflect.Type
index []int
}
// Fields returns all the exported fields of t, which must be a struct type. It
// follows the standard Go rules for embedded fields, modified by the presence
// of tags. The result is sorted lexicographically by index.
//
// These rules apply in the absence of tags:
// Anonymous struct fields are treated as if their inner exported fields were
// fields in the outer struct (embedding). The result includes all fields that
// aren't shadowed by fields at higher level of embedding. If more than one
// field with the same name exists at the same level of embedding, it is
// excluded. An anonymous field that is not of struct type is treated as having
// its type as its name.
//
// Tags modify these rules as follows:
// A field's tag is used as its name.
// An anonymous struct field with a name given in its tag is treated as
// a field having that name, rather than an embedded struct (the struct's
// fields will not be returned).
// If more than one field with the same name exists at the same level of embedding,
// but exactly one of them is tagged, then the tagged field is reported and the others
// are ignored.
func (c *Cache) Fields(t reflect.Type) (List, error) {
if t.Kind() != reflect.Struct {
panic("fields: Fields of non-struct type")
}
return c.cachedTypeFields(t)
}
// A List is a list of Fields.
type List []Field
// Match returns the field in the list whose name best matches the supplied
// name, nor nil if no field does. If there is a field with the exact name, it
// is returned. Otherwise the first field (sorted by index) whose name matches
// case-insensitively is returned.
func (l List) Match(name string) *Field {
return l.MatchBytes([]byte(name))
}
// MatchBytes is identical to Match, except that the argument is a byte slice.
func (l List) MatchBytes(name []byte) *Field {
var f *Field
for i := range l {
ff := &l[i]
if bytes.Equal(ff.nameBytes, name) {
return ff
}
if f == nil && ff.equalFold(ff.nameBytes, name) {
f = ff
}
}
return f
}
type cacheValue struct {
fields List
err error
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
// This code has been copied and modified from
// https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/encode.go.
func (c *Cache) cachedTypeFields(t reflect.Type) (List, error) {
cv := c.cache.Get(t, func() interface{} {
if err := c.validate(t); err != nil {
return cacheValue{nil, err}
}
f, err := c.typeFields(t)
return cacheValue{List(f), err}
}).(cacheValue)
return cv.fields, cv.err
}
func (c *Cache) typeFields(t reflect.Type) ([]Field, error) {
fields, err := c.listFields(t)
if err != nil {
return nil, err
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields.
// The fields are sorted in primary order of name, secondary order of field
// index length. So the first field with a given name is the dominant one.
var out []Field
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.Name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.Name != name {
break
}
}
// Find the dominant field, if any, out of all fields that have the same name.
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
sort.Sort(byIndex(out))
return out, nil
}
func (c *Cache) listFields(t reflect.Type) ([]Field, error) {
// This uses the same condition that the Go language does: there must be a unique instance
// of the match at a given depth level. If there are multiple instances of a match at the
// same depth, they annihilate each other and inhibit any possible match at a lower level.
// The algorithm is breadth first search, one depth level at a time.
// The current and next slices are work queues:
// current lists the fields to visit on this depth level,
// and next lists the fields on the next lower level.
current := []fieldScan{}
next := []fieldScan{{typ: t}}
// nextCount records the number of times an embedded type has been
// encountered and considered for queueing in the 'next' slice.
// We only queue the first one, but we increment the count on each.
// If a struct type T can be reached more than once at a given depth level,
// then it annihilates itself and need not be considered at all when we
// process that next depth level.
var nextCount map[reflect.Type]int
// visited records the structs that have been considered already.
// Embedded pointer fields can create cycles in the graph of
// reachable embedded types; visited avoids following those cycles.
// It also avoids duplicated effort: if we didn't find the field in an
// embedded type T at level 2, we won't find it in one at level 4 either.
visited := map[reflect.Type]bool{}
var fields []Field // Fields found.
for len(next) > 0 {
current, next = next, current[:0]
count := nextCount
nextCount = nil
// Process all the fields at this depth, now listed in 'current'.
// The loop queues embedded fields found in 'next', for processing during the next
// iteration. The multiplicity of the 'current' field counts is recorded
// in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'.
for _, scan := range current {
t := scan.typ
if visited[t] {
// We've looked through this type before, at a higher level.
// That higher level would shadow the lower level we're now at,
// so this one can't be useful to us. Ignore it.
continue
}
visited[t] = true
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
exported := (f.PkgPath == "")
// If a named field is unexported, ignore it. An anonymous
// unexported field is processed, because it may contain
// exported fields, which are visible.
if !exported && !f.Anonymous {
continue
}
// Examine the tag.
tagName, keep, other, err := c.parseTag(f.Tag)
if err != nil {
return nil, err
}
if !keep {
continue
}
if c.leafTypes(f.Type) {
fields = append(fields, newField(f, tagName, other, scan.index, i))
continue
}
var ntyp reflect.Type
if f.Anonymous {
// Anonymous field of type T or *T.
ntyp = f.Type
if ntyp.Kind() == reflect.Ptr {
ntyp = ntyp.Elem()
}
}
// Record fields with a tag name, non-anonymous fields, or
// anonymous non-struct fields.
if tagName != "" || ntyp == nil || ntyp.Kind() != reflect.Struct {
if !exported {
continue
}
fields = append(fields, newField(f, tagName, other, scan.index, i))
if count[t] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Queue embedded struct fields for processing with next level,
// but only if the embedded types haven't already been queued.
if nextCount[ntyp] > 0 {
nextCount[ntyp] = 2 // exact multiple doesn't matter
continue
}
if nextCount == nil {
nextCount = map[reflect.Type]int{}
}
nextCount[ntyp] = 1
if count[t] > 1 {
nextCount[ntyp] = 2 // exact multiple doesn't matter
}
var index []int
index = append(index, scan.index...)
index = append(index, i)
next = append(next, fieldScan{ntyp, index})
}
}
}
return fields, nil
}
func newField(f reflect.StructField, tagName string, other interface{}, index []int, i int) Field {
name := tagName
if name == "" {
name = f.Name
}
sf := Field{
Name: name,
NameFromTag: tagName != "",
Type: f.Type,
ParsedTag: other,
nameBytes: []byte(name),
}
sf.equalFold = foldFunc(sf.nameBytes)
sf.Index = append(sf.Index, index...)
sf.Index = append(sf.Index, i)
return sf
}
// byName sorts fields using the following criteria, in order:
// 1. name
// 2. embedding depth
// 3. tag presence (preferring a tagged field)
// 4. index sequence.
type byName []Field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].Name != x[j].Name {
return x[i].Name < x[j].Name
}
if len(x[i].Index) != len(x[j].Index) {
return len(x[i].Index) < len(x[j].Index)
}
if x[i].NameFromTag != x[j].NameFromTag {
return x[i].NameFromTag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []Field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
xi := x[i].Index
xj := x[j].Index
ln := len(xi)
if l := len(xj); l < ln {
ln = l
}
for k := 0; k < ln; k++ {
if xi[k] != xj[k] {
return xi[k] < xj[k]
}
}
return len(xi) < len(xj)
}
// dominantField looks through the fields, all of which are known to have the
// same name, to find the single field that dominates the others using Go's
// embedding rules, modified by the presence of tags. If there are multiple
// top-level fields, the boolean will be false: This condition is an error in
// Go and we skip all the fields.
func dominantField(fs []Field) (Field, bool) {
// The fields are sorted in increasing index-length order, then by presence of tag.
// That means that the first field is the dominant one. We need only check
// for error cases: two fields at top level, either both tagged or neither tagged.
if len(fs) > 1 && len(fs[0].Index) == len(fs[1].Index) && fs[0].NameFromTag == fs[1].NameFromTag {
return Field{}, false
}
return fs[0], true
}
// ParseStandardTag extracts the sub-tag named by key, then parses it using the
// de facto standard format introduced in encoding/json:
// "-" means "ignore this tag". It must occur by itself. (parseStandardTag returns an error
// in this case, whereas encoding/json accepts the "-" even if it is not alone.)
// "<name>" provides an alternative name for the field
// "<name>,opt1,opt2,..." specifies options after the name.
// The options are returned as a []string.
func ParseStandardTag(key string, t reflect.StructTag) (name string, keep bool, options []string, err error) {
s := t.Get(key)
parts := strings.Split(s, ",")
if parts[0] == "-" {
if len(parts) > 1 {
return "", false, nil, errors.New(`"-" field tag with options`)
}
return "", false, nil, nil
}
if len(parts) > 1 {
options = parts[1:]
}
return parts[0], true, options, nil
}

View File

@ -1,156 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fields
// This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold.go.
// Only the license and package were changed.
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

View File

@ -1,55 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
)
// Retry calls the supplied function f repeatedly according to the provided
// backoff parameters. It returns when one of the following occurs:
// When f's first return value is true, Retry immediately returns with f's second
// return value.
// When the provided context is done, Retry returns with an error that
// includes both ctx.Error() and the last error returned by f.
func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error {
return retry(ctx, bo, f, gax.Sleep)
}
func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
sleep func(context.Context, time.Duration) error) error {
var lastErr error
for {
stop, err := f()
if stop {
return err
}
// Remember the last "real" error from f.
if err != nil && err != context.Canceled && err != context.DeadlineExceeded {
lastErr = err
}
p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil {
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
}
return cerr
}
}
}

View File

@ -1,83 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.8
package trace
import (
"go.opencensus.io/trace"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// ToStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO (deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}

View File

@ -1,30 +0,0 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.8
package trace
import (
"golang.org/x/net/context"
)
// OpenCensus only supports go 1.8 and higher.
func StartSpan(ctx context.Context, _ string) context.Context {
return ctx
}
func EndSpan(context.Context, error) {
}

View File

@ -1,6 +0,0 @@
#!/bin/bash
today=$(date +%Y%m%d)
sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE

View File

@ -1,71 +0,0 @@
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:generate ./update_version.sh
// Package version contains version information for Google Cloud Client
// Libraries for Go, as reported in request headers.
package version
import (
"runtime"
"strings"
"unicode"
)
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20180226"
// Go returns the Go runtime version. The returned string
// has no whitespace.
func Go() string {
return goVersion
}
var goVersion = goVer(runtime.Version())
const develPrefix = "devel +"
func goVer(s string) string {
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return ""
}
func notSemverRune(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
}

View File

@ -1,7 +0,0 @@
Copyright (c) 2018 David Brophy
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@ -1,260 +0,0 @@
package std
var Objects = map[string]string{
"archive/tar": "15ab337488de08d4ed43b665c5c0b592971d283b",
"archive/zip": "7e7e81adb9a53e35bc8bf3bf2e02c354d63f54c4",
"bufio": "900a143331595c8de90584cc0ffcb3086472c178",
"bytes": "6b82cb44f6c71e9bb239ca54368fffc5b580cae1",
"cmd/addr2line": "963bb7e97918aea491180b40ea53b18fc97307cc",
"cmd/api": "c63b7beb325974779aa72d17f3fdea3a467ac7ef",
"cmd/asm": "e5b77367ba6da08a4296e8ebfd76d4909983ce67",
"cmd/asm/internal/arch": "0b18eae570fc24e904ca39583c780824c071fd43",
"cmd/asm/internal/asm": "03697eea25b014b8bd6b5c9f0d2c3161c67f572c",
"cmd/asm/internal/flags": "74da3bea7765da3d9addeff2a05e89aeffc21956",
"cmd/asm/internal/lex": "63257a72969a02057822819e8fed75ef2aa002c9",
"cmd/buildid": "0ab8e19c5c56c4855086c8d255e1687b66bef6fd",
"cmd/cgo": "4b59941170fd7a631e25bc78f333227eaa58c55d",
"cmd/compile": "cd9c4340cf31235f26c4ab86d781a677de15d6b3",
"cmd/compile/internal/amd64": "f5d9ea54224f60d732336e59488323da104991c1",
"cmd/compile/internal/arm": "5f1ff1c81494be5052213bd7695b425243166245",
"cmd/compile/internal/arm64": "131c72c92e7f733578ed12222fccf5e8c8d0ae8d",
"cmd/compile/internal/gc": "11707f4b859dd96e9682a662e53c48ef0dcc100e",
"cmd/compile/internal/mips": "2c8b4248c5b1c2496580a18fc9ccabce42acdfe3",
"cmd/compile/internal/mips64": "3391cdfc9090f81772125705f45758801de545d1",
"cmd/compile/internal/ppc64": "9fd3949463b3aa3275d7888a75ed0eaaacc4c5ea",
"cmd/compile/internal/s390x": "8e1f986fb7c7eb37204a0a6c24ee9f8c215069e6",
"cmd/compile/internal/ssa": "941e46d61666bfdfa54935849ce6651c1ab1990d",
"cmd/compile/internal/syntax": "0baac36d0fb02c19730d6a42be40570e575d71ff",
"cmd/compile/internal/test": "43884665a6fd92fd9c44b10b6b34d2449e07ff87",
"cmd/compile/internal/types": "970c0bf280a035b2fc2bdcfeae42e2ff49570cfe",
"cmd/compile/internal/x86": "bd7e3618546e42a8582eec3542c12e33d6b6994d",
"cmd/cover": "2e2ed5d4337e5cb7a5f6dd1efaa1834f28eb50eb",
"cmd/dist": "278c4f075f264c8f0cffa7b173f3a8398e446e07",
"cmd/doc": "bfe3aa051de37789da56a55ab56f570f025e3895",
"cmd/fix": "63840d5f73aecb77a1130aa35dc8bebf9f23db8c",
"cmd/go": "d0497b8e244a7cc29cf51d33da287762379d4cfb",
"cmd/go/internal/base": "0b901b60c419f219f3aa1ba9b8d4453523ef6cc7",
"cmd/go/internal/bug": "6beeac79b053c06f6dc48c198b9a5b7680b5d890",
"cmd/go/internal/cache": "fbef08fa0bf3c22661de5819b784bce83dae699b",
"cmd/go/internal/cfg": "5de6af85a85259f426f370823be7476b670c3ee8",
"cmd/go/internal/clean": "21722d4990db7b316ef8028f7eaa2847f7920c38",
"cmd/go/internal/cmdflag": "f90faa92ec4b6b363aedf4108609f9ada1902010",
"cmd/go/internal/doc": "df5087ff4e0deda8123fd912a350066ba2ef226a",
"cmd/go/internal/envcmd": "92dbd6076beb8e60c27a9a06c41b4492d1b558f9",
"cmd/go/internal/fix": "29e570b55a7e5545aeaa97ee5ead5ff6f6690a98",
"cmd/go/internal/fmtcmd": "14062cef705cc43ce9ce1d591bdbb8ab4bf74c68",
"cmd/go/internal/generate": "1d4d7657a462ad1e0763ed4d83a61088e454750d",
"cmd/go/internal/get": "600a7cfab7cf9301fadb123afe9d61c8ff34e504",
"cmd/go/internal/help": "cd95d8ba2a5d72bdc0bb83164e1f3c8fa153cbf2",
"cmd/go/internal/list": "726c434add44550bf070f9fe3f0adcc6a8c0073d",
"cmd/go/internal/load": "b88a6325a7c54db8feb09bdd45a1560a2ccbd832",
"cmd/go/internal/run": "2bad8a70c0e271800747077956b5a39a796418c8",
"cmd/go/internal/str": "fb03b8606797b6ba0901079358c8444f1c5224b9",
"cmd/go/internal/test": "91bf61c6d1b604e76b0ad3995bb0feb2a84f6a13",
"cmd/go/internal/tool": "4f5a42fdb05c068d85203d37e23d14a86d82417e",
"cmd/go/internal/version": "7555f2666522c4c5f87b6c4c71bf39786864c24c",
"cmd/go/internal/vet": "4342628642a15911e1e6eceb4bc47785f0539241",
"cmd/go/internal/web": "0c27142b978c54e0f67079c0ea7d70f7675957d2",
"cmd/go/internal/work": "4e1df47b63309a9e5b2aa1300b4812f1f79585cd",
"cmd/gofmt": "6c5294464e582dd6f9481ae0eec64dd7fdb0da08",
"cmd/internal/bio": "1327fa774881dffd0ef8487f22ee94446c1e7e29",
"cmd/internal/browser": "f63d7d232a64d36e0d661cb4111fed3271c6e29b",
"cmd/internal/buildid": "59a66fee345a1b5ad7e3d4aed5f407f1c0b11199",
"cmd/internal/dwarf": "9359c64cc220c838ced541489e89f196e16bd2b1",
"cmd/internal/edit": "8b6767a16012dac7653d30e65f53f44a87165f7a",
"cmd/internal/gcprog": "8f14249401944d83d779c0686d05493fa30b6e8a",
"cmd/internal/goobj": "ba72b3ef2eafec511c19f5f909ba5a4b3483eee6",
"cmd/internal/obj": "5946fdf1133acaa0e8a54e40f0e73f93a5a9fb24",
"cmd/internal/obj/arm": "5862108885f1c92b4dfa745b27fd64adf7625a9c",
"cmd/internal/obj/arm64": "d103fb81d05a3049d0ffb844b20262f8db2b6fbd",
"cmd/internal/obj/mips": "1dce298c90dd344d834e58dcca6083cb62a8dc37",
"cmd/internal/obj/ppc64": "106f693ae2a228551fe78991bea7dce0939f887e",
"cmd/internal/obj/s390x": "d6772c65ac719dc6eaa3563f84d36e130c23490f",
"cmd/internal/obj/x86": "8f7b86cdf59e5e6042a7efcc8b23393f3c2a8490",
"cmd/internal/objabi": "3088cb6b9d1320c6b960cba6ab366c7062575c3f",
"cmd/internal/objfile": "55a07ab050e0aa7fd1e22636496674de55559e6d",
"cmd/internal/src": "d3d0ce6711a947d51f83b11754766ffc34c14572",
"cmd/internal/sys": "1040e630813349905a248583ccdcd343067d8c7b",
"cmd/internal/test2json": "fbc7f16fb2019bfba5cb77f40d1ed6986364ec27",
"cmd/link": "40b75131f57621dbb5ff914ffb60ddde45bea8e2",
"cmd/link/internal/amd64": "8635128e0240be42c0cae6ee6a4cc9f891571642",
"cmd/link/internal/arm": "23951653400195d054a549f244ae4c2547ae1ae5",
"cmd/link/internal/arm64": "3e555c1b3fba7fe259100dd15ac39a0193df2c7b",
"cmd/link/internal/ld": "055ea86a3b81b8e75643e22af23798877646df7d",
"cmd/link/internal/loadelf": "6f6be8070f314c3f737cc104b0508b0e528f2dde",
"cmd/link/internal/loadmacho": "b85a7cfad033b0fa6467280243a8d4e4f04bd0db",
"cmd/link/internal/loadpe": "e2ccda659afe2f0394710f466f66bdd579c54208",
"cmd/link/internal/mips": "12e4ada9c6791605194f04023a345fdd72370cf5",
"cmd/link/internal/mips64": "99d4d926a5e07311fed1cb637ab5179f652e2988",
"cmd/link/internal/objfile": "9baae95deab8676f1efb226f168ad684e2b64b6d",
"cmd/link/internal/ppc64": "6e8ea57c52ad09baeb3c48fd13b9d9d840074184",
"cmd/link/internal/s390x": "49eb93b9ed182f65ba604f6868242adf279c2838",
"cmd/link/internal/sym": "d710f38d05dc0f46c324187034a9933eaa87b9a8",
"cmd/link/internal/x86": "6cf577f20d4e718e722bcc148119485dac5d4d22",
"cmd/nm": "fcc0279301271b19fb4a43730b7ace0a13f593b4",
"cmd/objdump": "d39a448b93ebafe5a9198426909deb11e304a39e",
"cmd/pack": "47b1d6349f0e1a5bc603cf03f771dc3ec5ef8e3a",
"cmd/pprof": "b4823792338f951c3aabacc657a5d12b3c6e3114",
"cmd/test2json": "13e42b6cbad776b717a02377950ddbfd0612fe01",
"cmd/trace": "24ac93cafa9533eeffffd9b22fbe3bb09b1ca5a3",
"cmd/vet": "04b858ebb82cacf59ce16d757409ca9eb3f7a85b",
"cmd/vet/internal/cfg": "6d94a2fcaf5af5715e4d51903e1df89f90d20610",
"cmd/vet/internal/whitelist": "84e247851dda6b4a2a7b6bf859e57c0964e4eba0",
"compress/bzip2": "14b755d13f478f3620837efb68923fd0b813d16c",
"compress/flate": "44ad5df52517ad7c4f3554880f20a2c497c81d6e",
"compress/gzip": "da2136e141b54d003dd733b64dbaf88043fe1cc1",
"compress/lzw": "4246d3d28c9d0c8bd383ebf70a06b9b5aa3a0908",
"compress/zlib": "0881ae885c64db0c8ef2a82ff1c5ff5d94ba6007",
"container/heap": "2698061c14691927ded95164c3d800256871f2f7",
"container/list": "a0786604da09c84a42e71b72b4f10227dbaeba5b",
"container/ring": "7b4dc0a362c4b9f78474c3feb0edb733d08307dd",
"context": "83acce6b568fcfc990f3172d005b742874cc107d",
"crypto": "5d899bb007b2f094ec98f2f34470e724d4af9b22",
"crypto/aes": "9bbcbaf8a59cd3db1bcf8fc5e3424bf225ff68e0",
"crypto/cipher": "5e8288bf5b22ed3d215da04eddd6581400a052fa",
"crypto/des": "027fb7422523858e7fa61f961901d23f843311b8",
"crypto/dsa": "14388cd533f5a4d9206b0cfa0c7822e139326daa",
"crypto/ecdsa": "c1a180288447d55d56907168099ad4bbd44db0ff",
"crypto/elliptic": "0b202a7430f6e5195138c60152a0483224cbf7c7",
"crypto/hmac": "0d414ebc2bd9682c291aefec4a561ac66cd3bc6d",
"crypto/internal/cipherhw": "c4f4420e454b384fe9043dad79ef49c81b247a46",
"crypto/md5": "da312c9335796264f28077714a04b2e967599554",
"crypto/rand": "65d3b0f3dd132340aeb23e2f4181dcaac848aa1a",
"crypto/rc4": "ad1f46a9dafd1385206f8c5f62675ed5b0e8f4a3",
"crypto/rsa": "e75bf8a8b29eb153b12d33c249effa14599297df",
"crypto/sha1": "dccddcf23c8803452d7a4ce7e75db355f42995e0",
"crypto/sha256": "34264bbb223410ea57fdfee9bcccaee0c82402a1",
"crypto/sha512": "0cc15df431c27be746db6758e85383bf89248134",
"crypto/subtle": "f44c91eafcb8d9c647f289047a7c3bd269a4520c",
"crypto/tls": "e8d9082a48d23ff657afc01bda6885484a73c12d",
"crypto/x509": "cdc4bc77c8d1f4b71f24e756059d4aa8ff829581",
"crypto/x509/pkix": "62332f4e6bd7f99cec334c3da1406b01709cf743",
"database/sql": "db827fdc1257a38af1f51dfadc43a1994c743763",
"database/sql/driver": "6ab4180904c5660b2108e87e1fc0ef756d9470b6",
"debug/dwarf": "320ca24a70cea1c46ba80a660519059c7a071a6e",
"debug/elf": "e78bfb08b9a257714de4203999a5d3be0e0c1b63",
"debug/gosym": "6edebb5048171ef44e020cb8576ee1c89b4931d7",
"debug/macho": "e3b669a9cf723b261aa017586ec7fd3f3a53b3d1",
"debug/pe": "f292145cb6ac5d4a6f36c48d9a818d5c3d63b578",
"debug/plan9obj": "5c47f5ab91fd80075c93cca799a3495598266a9e",
"encoding": "415fc2b9d9a773a6b2cfeac876ab3e1dd3fc82f3",
"encoding/ascii85": "d95edb12de6a80133e6d7c23980e7bf531ffa326",
"encoding/asn1": "887607f6e28b65b35829bcb9a10b242ed1bc432f",
"encoding/base32": "c0d6c798b53d1416073cb2fe7e09f4d123f39c54",
"encoding/base64": "f3da6928ba01200e61a372d0a83498b817927ff1",
"encoding/binary": "7f31051d0aee488777bf2ed93992ee26d83caa9c",
"encoding/csv": "a1631dd7746aaff8b155d325e8e79856a09ce07c",
"encoding/gob": "ae9bd85d3db2b2b4b996c03a4451095ab6d8ae1a",
"encoding/hex": "72ba0a067cb8cb6702fbc45f853aeaf3a1687aa7",
"encoding/json": "2a43b7ce1c4c359bcb463ecc4e1d911d9a8b1cb2",
"encoding/pem": "bb8170f7fc68244ca34155ab338f460b57adc759",
"encoding/xml": "d68f0851f6f4cdba81498ae59cf287b86717368a",
"errors": "0ed7effb5355027f184e75dd0870c0e3bf12e496",
"expvar": "eb4e8ed5be6eec450663160232d71c2359638792",
"flag": "d22340ece91cbe74d65092934f829bd29739c0e9",
"fmt": "8845ae73100f45a34a45a61be592d0c24f3b8dc8",
"go/ast": "b7c368b75c14eaa115f519a5c12e8852555dd150",
"go/build": "0f8a6272ddb9baa281d29c937879a9dcd9a52b7d",
"go/constant": "5299fd8fc0da218395d0b08c07efccd9ee7ebf56",
"go/doc": "9401bff4d1f2b7b95377bb47b45c34496dd7c7ed",
"go/format": "12eb7f3cd122bef27959f998b3900e31f2a333e7",
"go/importer": "0b58a35e140bddfb6b124de4e64ce1f05382fb81",
"go/internal/gccgoimporter": "e2a82441f666f41f85fa8d19f5f45ac34ca0a96f",
"go/internal/gcimporter": "5bc9ddd442bdb25e2c40be48ef16cfd51f38050b",
"go/internal/srcimporter": "0740ba483f598cb1cfe47e1a704e2d31cfd52fef",
"go/parser": "33d621dd14dd18c087a2b67cec9a563742e676eb",
"go/printer": "7b86910fd26f193b07b2fa73e3d82dc057e61f09",
"go/scanner": "87bab94b60dfbafa0cb308df1c06960d73050d46",
"go/token": "3276595ca025ab006a504536a3f71fb272bf38cc",
"go/types": "5c23b36d7e3a8137c02f9efeb25e1bcc552abbc0",
"hash": "120a0e86158728a311e29cd59459829c60b2e69d",
"hash/adler32": "a0a4e35d2a8fea6de62fbe373db24671b36f3df0",
"hash/crc32": "ebc22a83a526b1434c7dc45669735a108f70ad2a",
"hash/crc64": "fb7ffe1c5efce45f3ff8dd598f49db94dbab4e8f",
"hash/fnv": "163d2027e8d292f092979e9457230b58376bc29c",
"html": "bd99208843be905728b1edb424cbee8099d28559",
"html/template": "69e171797015cbba3d5619672fb80e4cf5ccfccc",
"image": "2b3c47c094042bb750ae4ce378001ed33b027328",
"image/color": "a8ca152bbfcf7535f3bba831874941321ce31f84",
"image/color/palette": "88adf3330bca031bcd4d573482654788cde7eb73",
"image/draw": "ab6e3c04e6b41aa17268902ecd457faba39b1022",
"image/gif": "d197122c65272f440e5321865d711feaeef2816e",
"image/internal/imageutil": "925f66b868a2b7cc058bcd9d941551bc51e42981",
"image/jpeg": "43c59fc43fdefdbb26f1b348987072ecbf406ba0",
"image/png": "affd58f47204ed0e01eea7bc074ad3a8684b47f0",
"index/suffixarray": "6b2417f1ae9d72d2171f5016de9e5dac26f6518d",
"internal/nettrace": "49b2927d7c96878aad5c6b3e1fa9b658e49c1f66",
"internal/poll": "91e5738148f21749cd72001ff793c343f1d91a9b",
"internal/race": "e874f96524f19c01991682fbbd534857c8e33d29",
"internal/singleflight": "e64edee5037f3c924abfc6f416f0f61d32507718",
"internal/syscall/windows": "add264fe24511441db37ce44514e88aff41c3226",
"internal/syscall/windows/registry": "6e034a9ef4779b2a6e4c0b9345b3bcbd866e536a",
"internal/syscall/windows/sysdll": "fea1910491cb1c91743db4b991d72238b9df51a9",
"internal/testenv": "98df772fa46c5f14f0091c17fe8027016a45da48",
"internal/testlog": "5e699db5f89a5d6b701ac42427f5950c3e4b0569",
"internal/trace": "da3b81eb4706bf79ad259c5886acb79535479cd5",
"io": "f423a1807f3b40209b461b5dc17b050107d5c2d5",
"io/ioutil": "a053733680456ee4b045d05f820b02adef8dc619",
"log": "bb287a57aafd078de0c8a3ff880dc455239f0cc4",
"log/syslog": "94d63e9d5b580d0e82b38e4d86981e116f84840a",
"math": "48fe4f989c2a39d8f3b9abf06c7f6ffea92a56b2",
"math/big": "f9cf132d27a5ac6769d25ac00db4ca6ec6604b62",
"math/bits": "4747eff69c7c5bff78dbafc6e8edb68c442b6c8e",
"math/cmplx": "c307ef2dc8737aa41e91158f35f7d0f163cccf05",
"math/rand": "4495ad9f2e61bb1f02e6347923b627350529b2f7",
"mime": "f85d43115f960c97bb6e590db8015305ba93faca",
"mime/multipart": "efb04f4c0a01e010cf67b04fecbe0b0663597618",
"mime/quotedprintable": "52c35180d6b397bf9a42ac6d5fdea73d1ae4b775",
"net": "375f4b17d414f17c07c2c669cf95a2e3247bf1c5",
"net/http": "de1b542e8ec0c9395003ac74a51cdd1ce68c82a0",
"net/http/cgi": "7d9a8037d075f17165bd1c36ab22d483fa8b3918",
"net/http/cookiejar": "f4d6251151539be4db7bfc4f5005be4ac53f0572",
"net/http/fcgi": "ff5f6f00282a7cbdd52f157ab817bbc1d2bc4053",
"net/http/httptest": "51c9c338b49089594f54d2c749785104ca569c2c",
"net/http/httptrace": "8649a80c53a15ff4c38bc4526c8003884760538b",
"net/http/httputil": "a015034821be57ac6048e81ef6a29d1a7c176e8d",
"net/http/internal": "c295297dd11962d199002fb3dd45940585f59072",
"net/internal/socktest": "3f1877c6a4ddc3065aaa0347b826c6cb3d966df3",
"net/mail": "fd621b24db1f5c932a28932529920243c3f34f24",
"net/rpc": "08ca2b5dd8eeb119b070e03a7e71fb247da27f39",
"net/rpc/jsonrpc": "14704235728b176b628d314ca3acf6e0b0a8c1a8",
"net/smtp": "c9ca61733682e03ee584f0e3d8c0b34dd85b0548",
"net/textproto": "ab8f321dbcaa9ca244d2bd0f58a5688319038b58",
"net/url": "c2f7a2a13a47882c49bbc1abb652c9dfbcf4a813",
"os": "0f9fbc4e73c99c41e195a60ed7304ccd2ab7d9ba",
"os/exec": "fef4dfbcc5ae28a702cff5ed1923aa25a51556c0",
"os/signal": "67678bcba5a875e7046e188b078a366f95b6f7ec",
"os/user": "46624d908299c9ffcb901a71d36838ae98065cc8",
"path": "32616f9b09406d7d1859203dfba4414a7718c017",
"path/filepath": "866ffd4a1ec78ab26b12063b21cd5b3daf32da65",
"reflect": "797f711384742fb12ba0dee13718b16a78f2a0dc",
"regexp": "167f26b4435ec1fb9b278c5e0fb58369c0113065",
"regexp/syntax": "776829a63cc75d6850fce14fdc468b083dec695b",
"runtime": "4c52130a53ff145103149e371dfe3395f6733d81",
"runtime/debug": "459760c61d12afecf51e46b4dd73f44d3dffd459",
"runtime/internal/atomic": "26cbece60a0bba5872594de0a51deea8b122784a",
"runtime/internal/sys": "c78985091482ec3bf7406df7e9a76dc63dba4c13",
"runtime/pprof": "6129f90bb1aec5491ea4423c6cfe7f0fd88f8b7b",
"runtime/pprof/internal/profile": "f0979e5efc52777aa6c559ee65359af450cc2dfe",
"runtime/race": "fa09b50a9ac163964ca4160e9ea2c0454f865298",
"runtime/trace": "186eb273c04e094be94ce1ffec2c0d09395767ac",
"sort": "ebbcba118f5a68d8b5042c5cd97679ece38f86ac",
"strconv": "487aae7af1ed80ff20d9822bf1f6aefc476a8c0e",
"strings": "f2b3805f7f65bdce63db1345b65a323785e076c9",
"sync": "e6bf8ef4dceab04e6cdd2915287d8e534daedcc5",
"sync/atomic": "743b8165f41a27e4967678d2ccf4a0216a48b1e3",
"syscall": "013a2c031bfd18fe3b7198d83dad80184679f168",
"testing": "fedbd14559ab16014c4e5f50fe05a98ab1e3a370",
"testing/internal/testdeps": "cd107e90ebfbdd07b93f5df6a94826e6fa4a5831",
"testing/iotest": "e26af1efad6020bb0c41c17b299cdc7cd21ab300",
"testing/quick": "7869be9f9d5a53ae80c7febeaa73c6d3685b5a1f",
"text/scanner": "2876f6e377ef9272e5fe02d8ef5326e50e2b52c1",
"text/tabwriter": "393d58f6e5188153adfbcd84cb7ee100644b6859",
"text/template": "a2a4c1efbdf9c959e1e4227eb7d2e3fb61a990ec",
"text/template/parse": "e43a0cb894c746ec1a735c96bc7d834fd0e143e3",
"time": "270ceed46d05f6fbd1f9e4642482bf7261bdb367",
"unicode": "48df271f8ef77d0cd7a9497a35b46c22e93566e2",
"unicode/utf16": "3e38be1cc05e5a48e063af3b5b600700f592214b",
"unicode/utf8": "821f302b633f7fc66a82f9aa01e0a2786cfe9872",
"unsafe": "735d455ece4348f1178b766a5b01d9fe41097368",
}

View File

@ -1,6 +0,0 @@
package std
var Prelude = map[bool]string{
false: "2f55d1e8cd60b3405002e438e9fbfd71293e089f",
true: "9fa5354baf5360cd05ba2ef218170de88191c7ee",
}

View File

@ -1,260 +0,0 @@
package std
var Source = map[string]string{
"archive/tar": "ccc410879ab564f0cdb73c29ca3c886ec05c7bd5",
"archive/zip": "643782f191e87707832b0123580c2278d255cd07",
"bufio": "5e659edd24debe6e1605fe1ea5bea6a8d3383a76",
"bytes": "2e77c497d5f4c2f8e2cc1477edab99144662590c",
"cmd/addr2line": "7b5b74b8ae081e8f1730690374fce0be126d6322",
"cmd/api": "b07dd94d65d1a0b986e3dbf5a634b6851ba71cb7",
"cmd/asm": "8288ff512e3750238111f2ee196aec7a3417445d",
"cmd/asm/internal/arch": "100e1d75d879a6d5e2597d9f1e0f01d0534c40ac",
"cmd/asm/internal/asm": "34f2fd71b22230439d115616c5f52142eea85a60",
"cmd/asm/internal/flags": "f3ee16aa4481b46f0d59c3720e421d2bb4f36698",
"cmd/asm/internal/lex": "ee7d9289f9132c62a0504b657bb8020e2a60dfdf",
"cmd/buildid": "c89976450be818bd861b30f729322bd3b77cd96f",
"cmd/cgo": "b13c13884097ec8a4bb12922d99badfb2e72f358",
"cmd/compile": "35f45cfd3493dd5b0d60afb3aa248713f58d02c2",
"cmd/compile/internal/amd64": "2019840f88684aa790fd368fc5f860f02abd54db",
"cmd/compile/internal/arm": "bc5e3cfb24509e209ec6ebd8132ab89dbef3571c",
"cmd/compile/internal/arm64": "d71ab7e1538ce8d7acf00ca3215e2c9c3a579164",
"cmd/compile/internal/gc": "c0401b25520538bd4b49420399c82c415215ca91",
"cmd/compile/internal/mips": "7335540b70dd2ebc868937b35bc51ac79fda014e",
"cmd/compile/internal/mips64": "fba107903be3a2af7e4e696908513360e3c5e7ff",
"cmd/compile/internal/ppc64": "0a8046b67e8f719406eaf8ec5bfe505e3797d99f",
"cmd/compile/internal/s390x": "5ac9e84f9383f8349e67b542bf600b9ae55e494f",
"cmd/compile/internal/ssa": "979ac0387d8c936f20eede662f7496c7ef74a230",
"cmd/compile/internal/syntax": "a2a63115864183969875decd4cf67470ecfd00bb",
"cmd/compile/internal/test": "0e88cb01072819d2886dccd61c3270b56790dcae",
"cmd/compile/internal/types": "56cebfa6dd70ed50768bebd4e0a183552ea09c7e",
"cmd/compile/internal/x86": "b4fa96c003514e4bdd532bcfc6dd239c32b0bb67",
"cmd/cover": "7907e8f1ff26c943db73f7d011e2519121505a00",
"cmd/dist": "930ea877211b27862426b307c3856aa2d6f6fe6d",
"cmd/doc": "14e62886eb9450f76a30ef8dee974bf980653555",
"cmd/fix": "3f395bf0d2c2ab97d4240813464a7767dbdb42eb",
"cmd/go": "2dffa35e470844f6432ba4a30871a16cd69d8a70",
"cmd/go/internal/base": "072aee9e73b917245139025ba6f640797940144c",
"cmd/go/internal/bug": "8eb8fdf42f14505a9545d54eb6d8a6b28f86a0a1",
"cmd/go/internal/cache": "f26994401f3a6d67a745d7b5b40c8bbc95a9996a",
"cmd/go/internal/cfg": "3cc3555e0d78d160b1515f5b82a763a0327a0067",
"cmd/go/internal/clean": "2f12e8b25fd4119fb22f819e30aa7c45f4118205",
"cmd/go/internal/cmdflag": "d1653c20931e50cea9a1ba274fa191ec9ee17838",
"cmd/go/internal/doc": "8840cfa6e2bc767aae33fbf4b080feab619c1422",
"cmd/go/internal/envcmd": "3aaa0d382dd2bd5c5e8ef22c37e35dba18d8bec8",
"cmd/go/internal/fix": "d424078ea47526dc9d02c7fa32afb27f9f38f68a",
"cmd/go/internal/fmtcmd": "17f2443827a3a2aa5134a0bc78c96d85ffaf040e",
"cmd/go/internal/generate": "234aa9aa89cd04be818adf37aa255818895e5718",
"cmd/go/internal/get": "5bac3e8dca56263b6da189f3f891ad8a79b94e3d",
"cmd/go/internal/help": "edb7d94df6542664fc3a3ab22bb34d80ab5aa7ca",
"cmd/go/internal/list": "f6816f79ec83fd31b7b1db2fcf387cd07f75d5ec",
"cmd/go/internal/load": "1cc1045b041cca8f7af7775010f8c385b9ab51e2",
"cmd/go/internal/run": "b35aa893cb0df5c0cc4414bad81f5a3b97da4da9",
"cmd/go/internal/str": "fb18aa51a195dacfd6f507f1597526c71226403c",
"cmd/go/internal/test": "08907aa3e61f69ead134034174f9a46834844dff",
"cmd/go/internal/tool": "92b25138cef86bfc84bb006550213328d111910b",
"cmd/go/internal/version": "66b1014254f0fb76e06395d7d7ffe4baae9705a5",
"cmd/go/internal/vet": "e94e4a8e900276d7d3bb26d1049524437340589d",
"cmd/go/internal/web": "91ea44c8f05da4253f1e4046e157bf8f10bfe79c",
"cmd/go/internal/work": "cc12d63c5dfa257b8603b50754069930ff3307c0",
"cmd/gofmt": "5c6baeb0fed28e0da27f6b934184215d37888a7a",
"cmd/internal/bio": "3b2ffbb8862421250c922c5eaca61880b07d3b64",
"cmd/internal/browser": "57793ec758038f07d7700ffb8be9b3544d34e35b",
"cmd/internal/buildid": "c9a2ff5df5e1337d667ac44d97b0b981102d575b",
"cmd/internal/dwarf": "be4e9455437b65a1fa29a3aa1f6dad9230d1b6c0",
"cmd/internal/edit": "afe712248e87abb49922eab361a8b461b68758ca",
"cmd/internal/gcprog": "de85860e0a300073787526b65771a54f748aec7b",
"cmd/internal/goobj": "bbe133deecfd6b945adc63b55eddea9d80dcaa6f",
"cmd/internal/obj": "39a83e3490b3c8e1f2050ee7e953242e7c78d39e",
"cmd/internal/obj/arm": "ba8d151c30024260585478d1561dc1103050b91b",
"cmd/internal/obj/arm64": "bafc91ebb4ff648edf74e37c3d4558a3f3847d91",
"cmd/internal/obj/mips": "82243f5266cdb2bcb070c8060b093869c6f6218b",
"cmd/internal/obj/ppc64": "071779c4637d125b8f6af09797d75b984a2ae1ca",
"cmd/internal/obj/s390x": "e0ab0355766bba46a1e8804f2708d6a652401c29",
"cmd/internal/obj/x86": "880d8a1b1cf3a3c648759af439569e904c4832d5",
"cmd/internal/objabi": "6e8d6f93703def228336d684c1d79c139f326643",
"cmd/internal/objfile": "3d4b3fac3b56e297be46ba153a045a9f81023bdc",
"cmd/internal/src": "2c090c720389d4628ffd3aa921d4b53ab4ab9d2a",
"cmd/internal/sys": "a955213ba9e97d4b7b0bc6bf91871b512c1ea58f",
"cmd/internal/test2json": "c7967e20bec8dc78ff3114d0ef3addc71e548899",
"cmd/link": "c76b6ad9974becb6423538a3932a0fd4597babb7",
"cmd/link/internal/amd64": "ce71b0141291ec64958b1917fbbfd6fd284fb250",
"cmd/link/internal/arm": "37a0ea8a1b5a280a47f43d029229f9d576432480",
"cmd/link/internal/arm64": "955d4fbb066b9a6b6d0fb4e482397b343b49105f",
"cmd/link/internal/ld": "c3bf1d67bc95d6bf8021c42dbd9af9e82268f572",
"cmd/link/internal/loadelf": "4b3d1e9c28cd3103336f97df72e24a0caa6be436",
"cmd/link/internal/loadmacho": "cf7e22719c406bf76fa785f7d12ed3f4cf375f9c",
"cmd/link/internal/loadpe": "4f17bde03f1a203adfd41efe4381dabb3bfe608e",
"cmd/link/internal/mips": "fbccee6733b653a479b581e4277a4bbfe49b7987",
"cmd/link/internal/mips64": "df940e525cb5f7893ea2d17e9caf6c656e77d5e8",
"cmd/link/internal/objfile": "2a4306cf8cbb1fd5202d1578abd8d8f104bf7742",
"cmd/link/internal/ppc64": "5840eeceb28a2b09e9d12fe36a4dac384e2fb096",
"cmd/link/internal/s390x": "6c3eab29adfc5f6813874ad08c36ef322313b338",
"cmd/link/internal/sym": "b56da7b879bdee6bbba68a2c2035d9a128f6cff1",
"cmd/link/internal/x86": "a7680162f6a2509ea432b049cedb95715e28cdaa",
"cmd/nm": "53e4f5681b9bd383f05c7d51cf39be7a7875274f",
"cmd/objdump": "9b4248cb0d3eaac4baabe4266d6ee9ea9d8d7099",
"cmd/pack": "c83bbda9671805d91e13dd8fea33e572de4a9a84",
"cmd/pprof": "83003b5a46c20630ee726e608a4530edecd01d8a",
"cmd/test2json": "9c75eee5256b1fd688e0f615d57e9228eca917f6",
"cmd/trace": "98d294ce16acf171a434a6cfc754b113e84c02e3",
"cmd/vet": "d4de3ac6ffcd1f92d876c2ee65af4e63ecadbbd2",
"cmd/vet/internal/cfg": "a129af3a08823e806b67fe02f74fb23b61154ed5",
"cmd/vet/internal/whitelist": "332b410ddb1ff11c5715407256f63fffcf9c82a0",
"compress/bzip2": "8f3e40fb446c409868f46919e8c65664a38a5bbb",
"compress/flate": "39069902aed58c7a09ff01d5b50c12f41fecca4b",
"compress/gzip": "cca6fda0fffda7523d6a20d5dcba21c53bde3356",
"compress/lzw": "3e38fc4c673c6ad507d10f8664103b165fadf69e",
"compress/zlib": "c9d38327e41c20060efdb32a44316d21ad8bdbb8",
"container/heap": "23ed7ff89b243b5f9c39a01094ffff9a11447245",
"container/list": "fe581ebdd09b1acb7c36c69b808cd894770cc6e5",
"container/ring": "026bd1a27ed2036958e84d0e39ec6ad7a1b4177f",
"context": "04e09e44401b4423ca456c1dd86f6652f37d0df2",
"crypto": "9fca973f48740f93f0b8193692367d91d7160c11",
"crypto/aes": "82f2958b3499efc5b2aaf2d9cb61390c8a3ee9a8",
"crypto/cipher": "41b99acbd4e01834e0148c0dc722b0cb7fd5d539",
"crypto/des": "d07b5eae49fcc00548957f6670c96715a5357442",
"crypto/dsa": "bb6e61e5d0ed7f12524fef8a39d77321f7eb1f32",
"crypto/ecdsa": "e4589f115ba87914d0207e9b38dba68a4e117e96",
"crypto/elliptic": "588fa3b66864eff7a46a6ebe350666c011c3d44b",
"crypto/hmac": "4f2534230f9c601ec6d84d48be5a529d6052afb6",
"crypto/internal/cipherhw": "96120000f98d8157bee5b0979370dae292fd8f32",
"crypto/md5": "b2b2d5487edfa8912532b831db22429ab7511b66",
"crypto/rand": "44560b0ae5b88d631eb0cb197d336c4644de012f",
"crypto/rc4": "ad3bdb64b3985f5eaa9a12003831596191a91dea",
"crypto/rsa": "5a09162594e471fa116d70c5048299b006e71591",
"crypto/sha1": "41b0f2d0a6990da730dd481492f987fb4a65b0ae",
"crypto/sha256": "ed9482680becf3a2514c4295064ddaa3983285c4",
"crypto/sha512": "08d5d9b05d62305743791e0849d2fcefe09ab37c",
"crypto/subtle": "1f6c65fec63959f815f30c241a1d1e366eb2470f",
"crypto/tls": "396dc44cbacfa2b76edcc5f92900580ce57278f3",
"crypto/x509": "528964728ef2740aef6f39b59259b7d3ee2ba20c",
"crypto/x509/pkix": "6763d431044ba402c50bf158f95e58f78a77f458",
"database/sql": "10c2f65c0fa79b7557d1bc54e23ce3f12d730ef7",
"database/sql/driver": "42479b88966d04719b3bf2aa3a9fa82735571b99",
"debug/dwarf": "945b9d9bb59b065c7242fa501135110f47e58830",
"debug/elf": "0df0a3f833557b1620a52a5bc456d7800f15553c",
"debug/gosym": "969ebe46b87a5b381331df3aa9677b20c2e5ab53",
"debug/macho": "f27031e74561c815c7562656237d61b3a8f12b8d",
"debug/pe": "61ecfeea98dc1e56bfaab0f1fcde69f831871515",
"debug/plan9obj": "55a133ce4cd1bfef7dd251770f3b428b102e8a8d",
"encoding": "fd73d60677cd78191c9b4afc2c7611b77c3885ef",
"encoding/ascii85": "abc3fa5f4bf7f3af2215f99508ec60a1407cc31d",
"encoding/asn1": "44946adac76eb695d79a5af3d3056c9ddb3e54e2",
"encoding/base32": "821e54dde0eee994df08ee4a51c83e27173f87dd",
"encoding/base64": "5271d32186cd7eeeed9b4aaa13946be5c12dc269",
"encoding/binary": "70850e9a837090cc498c0d0370da5627b21d72be",
"encoding/csv": "567e96e95cfce97562e06b5e83c8c979a1f30042",
"encoding/gob": "d6b12188e4a7590766a5a749b71e459eaaff3d7d",
"encoding/hex": "8f648c63fa80537edcb6a3db95ca9cd6cda476b3",
"encoding/json": "c1dcccbb1cf158545af07e1eac75024cc53ba59f",
"encoding/pem": "23b030241fc269026194b609c8a7ceafb7320591",
"encoding/xml": "8bb9c67757ed88a8db0b643078ca4c787de02dac",
"errors": "4ca201947bfa9a6fab37643b194cdce9c1a3a293",
"expvar": "6488f49b7840b0c24845561f6c76edf5460eb8c1",
"flag": "304e361734c4e98b641596f0b759626352694c32",
"fmt": "47b59b906c9318ca6bb4c4b7ae7315aa147d846a",
"go/ast": "4eb28006c9bb0ddf9ffef4b2faa9bee54d42f71f",
"go/build": "a473a05de735a8e40817c55e2e1bfcd37fa5e569",
"go/constant": "1edc91255dcbada4a51f6e377447967386af9b08",
"go/doc": "8789735f1d7a365ce1384eea2b5e8e886015c08f",
"go/format": "7aa0af40cdb21007ef12d56c315e916b96e5f1b2",
"go/importer": "9e28303def4819452870e1fe863a50c0dfa8fd42",
"go/internal/gccgoimporter": "44d0f081fb2afbe069ea12fc719b981ae3dcee8e",
"go/internal/gcimporter": "2a5b1ce56138e358e63f5e9749fd07c0b8cc727b",
"go/internal/srcimporter": "8960da6c13f8b2a581bbdab19c5d0290e5f9ac0b",
"go/parser": "d68881c0d653f5bd96e717014b7b8bfc1bfcd08d",
"go/printer": "83c84309cb092cecfc339679ac666fed7df0911c",
"go/scanner": "0437e682c934ba10f1f3cc22d4210950e09c9ffc",
"go/token": "f11496105b60d51c0983cbecd65fbf616d5af0ac",
"go/types": "7771782cb47df77abb5887f07947b703f1b17ec4",
"hash": "57a5d9579cfb5818ce1ef1c22f17f42ec60ccc95",
"hash/adler32": "22bc3380ad903053a525e61107ad4e0d4faa66c1",
"hash/crc32": "8eae2da368c79fe563b9f077a8cbc22a1edfeb7d",
"hash/crc64": "d74bbe67282fdeae222758acd1d7461509b7e42b",
"hash/fnv": "210fc1ae2ac299b44c47fd799621df647220730e",
"html": "0e5ff9f699d204e268d3e3da99958d2bb1383783",
"html/template": "71d929f1e54b4f5f264647042015542cb9c96876",
"image": "9ff4ac804269aba78872fd75ee4c66ec9acdc0e9",
"image/color": "99b84024dd0e42389fe792705339475d4b639eac",
"image/color/palette": "a78544646a25daeb1173f66067275b3e128f6873",
"image/draw": "47f1249c360a17795570534ef85bd9f681e2705d",
"image/gif": "ab5d4655558804bbb9153f0080114581099e65b5",
"image/internal/imageutil": "eab8d946eae6372ca3e38dd06272b4976dc5d9bb",
"image/jpeg": "409bdc2f62f21863eee8bac66ea1b9e259bb0173",
"image/png": "690a05c289194f82f0e8f2dea7ef3f97e48d945a",
"index/suffixarray": "70a13e4fc0fa661ff3c1a5e227f1e04c65ccd744",
"internal/nettrace": "bfa72a1350f63e1b6a2a136d442e8c297baf866a",
"internal/poll": "4148afafebcc053fcab7cc82ec5c5d3511dd513b",
"internal/race": "43b2bd8226c1b1e802ba620126aff38606e1f9e2",
"internal/singleflight": "f43efb5e3367e81e04fd40d19c0887b9ea6a61fe",
"internal/syscall/windows": "d716bf11b81f535c9baf75436a04d197ed356695",
"internal/syscall/windows/registry": "810b8ca14595408db17ed38ed7552ca6f15b1132",
"internal/syscall/windows/sysdll": "ab231fa8df91852d1530f9f7dfffa394ea45a6ff",
"internal/testenv": "03fe5db87e51665ec55df0beaa76cfcc24f1cd91",
"internal/testlog": "3d7cbf7466e1a1d31eedf276814baaf24c824f67",
"internal/trace": "06e530436d871264953ba78193b1c1dc3e7490e3",
"io": "105843fa66276c950dcb9d40d25828f6c4a52d58",
"io/ioutil": "c814d8cbe83ff01544502d6cb0da5dda631507f0",
"log": "0b2c5c0f5b95202bf5883d03722b7d3586e74d26",
"log/syslog": "d5f313a5f1f55d6c6be4657272e3ee9736a83c81",
"math": "f7dbdb551b2358264c58b14dbb53e86f71a6e742",
"math/big": "4f94d42f2d863d6c84ce22e79b131465d86e49cd",
"math/bits": "52eca7b20019aa1ee1b863e2870ea36acfe90ce5",
"math/cmplx": "1d0d01bf318201ada488601df6135a8ea99cd9a5",
"math/rand": "abb4ec8a9399c60b39ba83dd26d532be074418d9",
"mime": "6501c87e25915296f1dde2a6c5f29709193ebeae",
"mime/multipart": "04eb6376deea968deacb6c6e9412b9e9731a9a5e",
"mime/quotedprintable": "d845a542bd5b1aabb67b1895141fc3ecb9dfd9b8",
"net": "f58b59277b44dd9e7b9a62f8aa932e9ec2ca6b57",
"net/http": "ea91e09098750b5aabea064734017f26986855f2",
"net/http/cgi": "3fdead89c15844110847d7dd1cf2764a2fdd9137",
"net/http/cookiejar": "b3c5c180d8ee36da21a20190bd002f701da20167",
"net/http/fcgi": "ee851e17c1ed055528df2a942a9d3a3241a5b3e6",
"net/http/httptest": "45789f4624b9156d2f1436ba4078e9aef47e6b04",
"net/http/httptrace": "a0aa1a87e4df19fea5d0681d64fa72912c642ffb",
"net/http/httputil": "42d8a884508d316fad41e05f36c0f4303763f410",
"net/http/internal": "4fdf5c058e11113c40ab17de4f4c5e67d1654d83",
"net/internal/socktest": "f001d73baefa9984403ecac54921aaad72d33978",
"net/mail": "be2ab9f037a6473025ac6b2fac1cab06973e97a3",
"net/rpc": "ae159837c01f37ba083511760063d6b52f971e20",
"net/rpc/jsonrpc": "51873ff2a371ddb9f1bc0185ae509e2b7978923d",
"net/smtp": "c0d652831155378d19eb075362c4c521418ef421",
"net/textproto": "063434b26a921295f23dcea4b8accd006e7fa786",
"net/url": "9d577cbd13b5b65fdb7296a67393d91467df6f9f",
"os": "5ae558a189f08fd91dd51864d0e55d849494720e",
"os/exec": "7c4cb41379b5c62800b3c121579c8fd4b0cec7a2",
"os/signal": "c4793c08e2bf17d4c123aa52e4c9e2a7cd368a89",
"os/user": "2dc102b8fa7617b00ac0bd035e4025a8d4209e6d",
"path": "1a0c50f9e1d05cb780a0480e25526b84545a6e2f",
"path/filepath": "6a69f2eabeba19cb5658c122b71cdf913c73ef90",
"reflect": "d8ef1f03568e260b370a61c2498aee726ebb5c67",
"regexp": "9eb5755968b4837f93fb604400da093ec82b1a86",
"regexp/syntax": "737deda762d3a46efdb4058b952ef062e6a15cb2",
"runtime": "7009e4a26e82a9d62043b7c9087ea36f862b7b7e",
"runtime/debug": "0e842735e9a9b38ea35e75febcfaaa4a5f7a61f4",
"runtime/internal/atomic": "bcb72da07b62aa4eb59ee1537e1c6921f65facc9",
"runtime/internal/sys": "c1843f187b5ae5c17e95e981a0901c1e85cd056f",
"runtime/pprof": "96913f5f8a0b81659e706fa852a7cbe9a4896be8",
"runtime/pprof/internal/profile": "0a641f0c0c03791863cea04ceb3b1c5ed12fa0a3",
"runtime/race": "22ee01048ca4a7e99a6b125b0fd9d4ebf0473a90",
"runtime/trace": "273633fe85b183b2dfb8a11118b6e4c9d3a0413c",
"sort": "b1a93a2abd9f289b6388b21b6f43f6898adf5d5a",
"strconv": "518ece4f7ed5d4a77b0770d4533b2c6517efc817",
"strings": "13e302986e73393f13486b94b17401b6115f0a30",
"sync": "249ae278c3d39f0f01772781fb9e62236df956f3",
"sync/atomic": "c2f59fd712f139b3e01983f06c2e57fc97d64e7f",
"syscall": "ae61f88c4213ec0d769af11358c206ab07a316ad",
"testing": "8a8354c7bf50684f23aef2810da344c72566b90f",
"testing/internal/testdeps": "a4a23520a9d7ff6cbd3ebaa147ec80aee536891e",
"testing/iotest": "e844db6b62b08f677ac696ca7fc1a3fd754d48d4",
"testing/quick": "44a58f3cae7131ac6ef82c47750804904f17e2e3",
"text/scanner": "8791c8b3d1759a2ee8e3b40d86da0cf3e25e88d6",
"text/tabwriter": "262949b17b2930092489e89fdca3a53c598a990c",
"text/template": "44f18a8087f1a51ecb1395d54f383bd84e38f46e",
"text/template/parse": "bdf0cfb3f000bcf4f2dc9ff0f618db9b1d483c5b",
"time": "2eefd7ef39c32f7a7e86c6293214c109394d4500",
"unicode": "c2721a6f1c6d636db283dae415f9824aafd7d2f5",
"unicode/utf16": "ef77f2b51ab7c06b67398888a6353c12478e90e5",
"unicode/utf8": "647282b3fe77fb6a3040724314d97f61997f5c6e",
"unsafe": "48c08f01e08a6c28b5aa7b4455a241a4c4d75d92",
}

View File

@ -1 +0,0 @@
package std

View File

@ -1,6 +0,0 @@
package std
var Wasm = map[bool]string{
false: "3912571228e6d97b1b91da16658e279128083f26",
true: "f0f70dfa57f349d021edfc57d8e5d1d993217e15",
}

View File

@ -1,17 +0,0 @@
package servermsg
import "encoding/gob"
func RegisterTypes() {
gob.Register(Queueing{})
gob.Register(Error{})
}
type Queueing struct {
Position int
Done bool
}
type Error struct {
Message string
}

View File

@ -1,103 +0,0 @@
package messages
import (
"bytes"
"compress/gzip"
"encoding/gob"
"github.com/dave/jsgo/server/servermsg"
"github.com/dave/services"
"github.com/dave/services/constor/constormsg"
"github.com/gorilla/websocket"
)
type Payload struct {
Message services.Message
}
func init() {
// Commands:
gob.Register(DeployQuery{})
// Data messages:
gob.Register(DeployQueryResponse{})
gob.Register(DeployFileKey{})
gob.Register(DeployFile{})
gob.Register(DeployPayload{})
gob.Register(DeployDone{})
gob.Register(DeployClientVersionNotSupported{})
// Initialise types in servermsg
servermsg.RegisterTypes()
// Initialise types in constormsg
constormsg.RegisterTypes()
}
// Client sends a DeployQuery with all offered files.
// Server responds with DeployQueryResponse, with all required files listed.
// Client sends a DeployFile for each required file.
type DeployQuery struct {
Version string
Files []DeployFileKey
}
type DeployQueryResponse struct {
Required []DeployFileKey
}
type DeployPayload struct {
Files []DeployFile
}
type DeployClientVersionNotSupported struct{}
type DeployFileKey struct {
Type DeployFileType
Hash string // sha1 hash of contents
}
type DeployFile struct {
DeployFileKey
Contents []byte // in the initial CommandDeploy, this is nil
}
type DeployDone struct{}
type DeployFileType string
const (
DeployFileTypeIndex DeployFileType = "index"
DeployFileTypeLoader = "loader"
DeployFileTypeWasm = "wasm"
)
func Marshal(in services.Message) ([]byte, int, error) {
p := Payload{in}
buf := &bytes.Buffer{}
gzw := gzip.NewWriter(buf)
if err := gob.NewEncoder(gzw).Encode(p); err != nil {
return nil, 0, err
}
if err := gzw.Close(); err != nil {
return nil, 0, err
}
return buf.Bytes(), websocket.BinaryMessage, nil
}
func Unmarshal(in []byte) (services.Message, error) {
var p Payload
gzr, err := gzip.NewReader(bytes.NewBuffer(in))
if err != nil {
return nil, err
}
if err := gob.NewDecoder(gzr).Decode(&p); err != nil {
return nil, err
}
if err := gzr.Close(); err != nil {
return nil, err
}
return p.Message, nil
}

View File

@ -1,5 +0,0 @@
*.iml
.DS_Store
.idea/
coverage.out
.coverage/

View File

@ -1,7 +0,0 @@
Copyright (c) 2018 David Brophy
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,24 +0,0 @@
Copyright (c) 2013 Richard Musiol. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,3 +0,0 @@
package services
type Message interface{}

View File

@ -1,15 +0,0 @@
package constormsg
import "encoding/gob"
func RegisterTypes() {
gob.Register(Storing{})
}
type Storing struct {
Starting bool
Finished int
Unchanged int
Remain int
Done bool
}

View File

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,41 +0,0 @@
// +build !js
package services
import (
"context"
"io"
"cloud.google.com/go/datastore"
billy "gopkg.in/src-d/go-billy.v4"
)
// Fileserver provides the functionality to persist files and host them for web delivery. In production
// we use GCS buckets. In local storage mode we use a temporary directory
type Fileserver interface {
Write(ctx context.Context, bucket, name string, reader io.Reader, overwrite bool, contentType, cacheControl string) (saved bool, err error)
Read(ctx context.Context, bucket, name string, writer io.Writer) (found bool, err error)
Exists(ctx context.Context, bucket, name string) (bool, error)
}
// Database provides the functionality to persist and recall data. In production we use the gcs datastore.
// In local development mode we use a memory data store.
type Database interface {
Get(ctx context.Context, key *datastore.Key, dst interface{}) (err error)
Put(ctx context.Context, key *datastore.Key, src interface{}) (*datastore.Key, error)
GetMulti(ctx context.Context, keys []*datastore.Key, dst interface{}) (err error)
PutMulti(ctx context.Context, keys []*datastore.Key, src interface{}) (_ []*datastore.Key, err error)
}
// Resolver provides the functionality to resolve package paths to repo URLs. In production mode this
// uses the `go get` codebase. In local mode we scan the gopath.
type Resolver interface {
Resolve(ctx context.Context, path string) (url string, root string, err error)
}
// Fetcher fetches the worktree for a git repository. In production the repo is cloned or loaded from
// the persistence cache and fetched to ensure it's update. In local development mode the repo is loaded
// from the local GOPATH.
type Fetcher interface {
Fetch(ctx context.Context, url string) (billy.Filesystem, error)
}

View File

@ -1,5 +0,0 @@
*.iml
.DS_Store
.idea/
coverage.out
.coverage/

View File

@ -1,7 +0,0 @@
Copyright (c) 2018 David Brophy
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,56 +0,0 @@
<a href="https://patreon.com/davebrophy" title="Help with my hosting bills using Patreon"><img src="https://img.shields.io/badge/patreon-donate-yellow.svg" style="max-width:100%;"></a>
# wasmgo
The `wasmgo` command compiles Go to WASM and deploys to the [jsgo.io](https://github.com/dave/jsgo)
CDN.
### Install
`go get -u github.com/dave/wasmgo`
### Usage
```
Compiles Go to WASM and deploys to the jsgo.io CDN.
Usage:
wasmgo deploy [package] [flags]
Flags:
-b, --build string Build tags to pass to the go build command.
-c, --command string Name of the go command. (default "go")
-f, --flags string Flags to pass to the go build command.
-h, --help help for deploy
-i, --index string Specify the index page. (default "index.wasmgo.html")
-j, --json Return all template variables as a json blob from the deploy command.
-o, --open Open the page in a browser.
-t, --template string Template defining the output returned by the deploy command. Variables: Page (string), Loader (string). (default "{{ .Page }}")
-v, --verbose Show detailed status messages.
```
### Example
Here's a simple hello world:
```
wasmgo deploy github.com/dave/wasmgo/helloworld
```
### Index
You may specify a custom index page by including `index.wasmgo.html` in your project or by using the `index`
command line flag.
Your index page should look something like this:
```html
<html>
<head><meta charset="utf-8"></head>
<body>
<script src="{{ .Script }}"></script>
<script src="{{ .Loader }}"></script>
</body>
</html>
```

View File

@ -1,14 +0,0 @@
package cmdconfig
type Config struct {
Port int
Index string
Template string
Json bool
Verbose bool
Open bool
Command string
Flags string
BuildTags string
Path string
}

View File

@ -1,37 +0,0 @@
package cmd
import (
"fmt"
"os"
"github.com/dave/wasmgo/cmd/deployer"
"github.com/spf13/cobra"
)
func init() {
deployCmd.PersistentFlags().StringVarP(&global.Index, "index", "i", "index.wasmgo.html", "Specify the index page.")
deployCmd.PersistentFlags().BoolVarP(&global.Verbose, "verbose", "v", false, "Show detailed status messages.")
deployCmd.PersistentFlags().BoolVarP(&global.Open, "open", "o", false, "Open the page in a browser.")
deployCmd.PersistentFlags().StringVarP(&global.Command, "command", "c", "go", "Name of the go command.")
deployCmd.PersistentFlags().StringVarP(&global.Flags, "flags", "f", "", "Flags to pass to the go build command.")
deployCmd.PersistentFlags().StringVarP(&global.BuildTags, "build", "b", "", "Build tags to pass to the go build command.")
deployCmd.PersistentFlags().StringVarP(&global.Template, "template", "t", "{{ .Page }}", "Template defining the output returned by the deploy command. Variables: Page (string), Loader (string).")
deployCmd.PersistentFlags().BoolVarP(&global.Json, "json", "j", false, "Return all template variables as a json blob from the deploy command.")
rootCmd.AddCommand(deployCmd)
}
var deployCmd = &cobra.Command{
Use: "deploy [package]",
Short: "Compile and deploy",
Long: "Compiles Go to WASM and deploys to the jsgo.io CDN.",
Args: cobra.RangeArgs(0, 1),
Run: func(cmd *cobra.Command, args []string) {
if len(args) > 0 {
global.Path = args[0]
}
if err := deployer.Start(global); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err.Error())
os.Exit(1)
}
},
}

View File

@ -1,342 +0,0 @@
package deployer
import (
"bytes"
"crypto/sha1"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"text/template"
"github.com/dave/jsgo/assets/std"
"github.com/dave/jsgo/server/servermsg"
"github.com/dave/jsgo/server/wasm/messages"
"github.com/dave/services/constor/constormsg"
"github.com/dave/wasmgo/cmd/cmdconfig"
"github.com/dave/wasmgo/config"
"github.com/gorilla/websocket"
"github.com/pkg/browser"
)
const CLIENT_VERSION = "1.0.0"
func Start(cfg *cmdconfig.Config) error {
var debug io.Writer
if cfg.Verbose {
debug = os.Stdout
} else {
debug = ioutil.Discard
}
// create a temp dir
tempDir, err := ioutil.TempDir("", "")
if err != nil {
return err
}
defer os.RemoveAll(tempDir)
fpath := filepath.Join(tempDir, "out.wasm")
sourceDir, err := runGoList(cfg)
if err != nil {
return err
}
if err := runGoBuild(cfg, fpath, debug); err != nil {
return err
}
binaryBytes, err := ioutil.ReadFile(fpath)
if err != nil {
return err
}
binarySha := sha1.New()
if _, err := io.Copy(binarySha, bytes.NewBuffer(binaryBytes)); err != nil {
return err
}
files := map[messages.DeployFileType]messages.DeployFile{}
files[messages.DeployFileTypeWasm] = messages.DeployFile{
DeployFileKey: messages.DeployFileKey{
Type: messages.DeployFileTypeWasm,
Hash: fmt.Sprintf("%x", binarySha.Sum(nil)),
},
Contents: binaryBytes,
}
loaderBuf := &bytes.Buffer{}
loaderSha := sha1.New()
wasmUrl := fmt.Sprintf("%s://%s/%x.wasm", config.Protocol[config.Pkg], config.Host[config.Pkg], binarySha.Sum(nil))
loaderVars := struct{ Binary string }{
Binary: wasmUrl,
}
if err := loaderTemplateMin.Execute(io.MultiWriter(loaderBuf, loaderSha), loaderVars); err != nil {
return err
}
files[messages.DeployFileTypeLoader] = messages.DeployFile{
DeployFileKey: messages.DeployFileKey{
Type: messages.DeployFileTypeLoader,
Hash: fmt.Sprintf("%x", loaderSha.Sum(nil)),
},
Contents: loaderBuf.Bytes(),
}
indexBuf := &bytes.Buffer{}
indexSha := sha1.New()
loaderUrl := fmt.Sprintf("%s://%s/%x.js", config.Protocol[config.Pkg], config.Host[config.Pkg], loaderSha.Sum(nil))
indexVars := struct{ Script, Loader string }{
Script: fmt.Sprintf("%s://%s/wasm_exec.%s.js", config.Protocol[config.Pkg], config.Host[config.Pkg], std.Wasm[true]),
Loader: loaderUrl,
}
indexTemplate := defaultIndexTemplate
if cfg.Index != "" {
indexFilename := cfg.Index
if cfg.Path != "" {
indexFilename = filepath.Join(sourceDir, cfg.Index)
}
indexTemplateBytes, err := ioutil.ReadFile(indexFilename)
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
indexTemplate, err = template.New("main").Parse(string(indexTemplateBytes))
if err != nil {
return err
}
}
}
if err := indexTemplate.Execute(io.MultiWriter(indexBuf, indexSha), indexVars); err != nil {
return err
}
files[messages.DeployFileTypeIndex] = messages.DeployFile{
DeployFileKey: messages.DeployFileKey{
Type: messages.DeployFileTypeIndex,
Hash: fmt.Sprintf("%x", indexSha.Sum(nil)),
},
Contents: indexBuf.Bytes(),
}
indexUrl := fmt.Sprintf("%s://%s/%x", config.Protocol[config.Index], config.Host[config.Index], indexSha.Sum(nil))
message := messages.DeployQuery{
Version: CLIENT_VERSION,
Files: []messages.DeployFileKey{
files[messages.DeployFileTypeWasm].DeployFileKey,
files[messages.DeployFileTypeIndex].DeployFileKey,
files[messages.DeployFileTypeLoader].DeployFileKey,
},
}
fmt.Fprintln(debug, "Querying server...")
protocol := "wss"
if config.Protocol[config.Wasm] == "http" {
protocol = "ws"
}
conn, _, err := websocket.DefaultDialer.Dial(
fmt.Sprintf("%s://%s/_wasm/", protocol, config.Host[config.Wasm]),
http.Header{"Origin": []string{fmt.Sprintf("%s://%s/", config.Protocol[config.Wasm], config.Host[config.Wasm])}},
)
if err != nil {
return err
}
messageBytes, messageType, err := messages.Marshal(message)
if err != nil {
return err
}
if err := conn.WriteMessage(messageType, messageBytes); err != nil {
return err
}
var response messages.DeployQueryResponse
var done bool
for !done {
_, replyBytes, err := conn.ReadMessage()
if err != nil {
return err
}
message, err := messages.Unmarshal(replyBytes)
if err != nil {
return err
}
switch message := message.(type) {
case messages.DeployQueryResponse:
response = message
done = true
case servermsg.Queueing:
// don't print
case servermsg.Error:
return errors.New(message.Message)
case messages.DeployClientVersionNotSupported:
return errors.New("this client version is not supported - try `go get -u github.com/dave/wasmgo`")
default:
// unexpected
fmt.Fprintf(debug, "Unexpected message from server: %#v\n", message)
}
}
if len(response.Required) > 0 {
fmt.Fprintf(debug, "Files required: %d.\n", len(response.Required))
fmt.Fprintln(debug, "Bundling required files...")
var required []messages.DeployFile
for _, k := range response.Required {
file := files[k.Type]
if file.Hash == "" {
return errors.New("server requested file not found")
}
required = append(required, file)
}
payload := messages.DeployPayload{Files: required}
payloadBytes, payloadType, err := messages.Marshal(payload)
if err != nil {
return err
}
if err := conn.WriteMessage(payloadType, payloadBytes); err != nil {
return err
}
fmt.Fprintf(debug, "Sending payload: %dKB.\n", len(payloadBytes)/1024)
done = false
for !done {
_, replyBytes, err := conn.ReadMessage()
if err != nil {
return err
}
message, err := messages.Unmarshal(replyBytes)
if err != nil {
return err
}
switch message := message.(type) {
case messages.DeployDone:
done = true
case servermsg.Queueing:
// don't print
case servermsg.Error:
return errors.New(message.Message)
case constormsg.Storing:
if message.Remain > 0 || message.Finished > 0 {
fmt.Fprintf(debug, "Storing, %d to go.\n", message.Remain)
}
default:
// unexpected
fmt.Fprintf(debug, "Unexpected message from server: %#v\n", message)
}
}
fmt.Fprintln(debug, "Sending done.")
} else {
fmt.Fprintln(debug, "No files required.")
}
outputVars := struct {
Page string
Loader string
}{
Page: indexUrl,
Loader: loaderUrl,
}
if cfg.Json {
out, err := json.Marshal(outputVars)
if err != nil {
return err
}
fmt.Println(string(out))
} else {
tpl, err := template.New("main").Parse(cfg.Template)
if err != nil {
return err
}
tpl.Execute(os.Stdout, outputVars)
fmt.Println("")
}
if cfg.Open {
browser.OpenURL(indexUrl)
}
return nil
}
func runGoBuild(cfg *cmdconfig.Config, fpath string, debug io.Writer) error {
args := []string{"build", "-o", fpath}
extraFlags := strings.Fields(cfg.Flags)
for _, f := range extraFlags {
args = append(args, f)
}
if cfg.BuildTags != "" {
args = append(args, "-tags", cfg.BuildTags)
}
path := "."
if cfg.Path != "" {
path = cfg.Path
}
args = append(args, path)
fmt.Fprintln(debug, "Compiling...")
cmd := exec.Command(cfg.Command, args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "GOARCH=wasm")
cmd.Env = append(cmd.Env, "GOOS=js")
output, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(string(output), "unsupported GOOS/GOARCH pair js/wasm") {
return errors.New("you need Go v1.11 to compile WASM. It looks like your default `go` command is not v1.11. Perhaps you need the -c flag to specify a custom command name - e.g. `-c=go1.11beta3`")
}
return fmt.Errorf("%v: %s", err, string(output))
}
if len(output) > 0 {
return fmt.Errorf("%s", string(output))
}
return nil
}
func runGoList(cfg *cmdconfig.Config) (string, error) {
args := []string{"list"}
if cfg.BuildTags != "" {
args = append(args, "-tags", cfg.BuildTags)
}
args = append(args, "-f", "{{.Dir}}")
path := "."
if cfg.Path != "" {
path = cfg.Path
}
args = append(args, path)
cmd := exec.Command(cfg.Command, args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, "GOARCH=wasm")
cmd.Env = append(cmd.Env, "GOOS=js")
output, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(string(output), "unsupported GOOS/GOARCH pair js/wasm") {
return "", errors.New("you need Go v1.11 to compile WASM. It looks like your default `go` command is not v1.11. Perhaps you need the -c flag to specify a custom command name - e.g. `-c=go1.11beta3`")
}
return "", fmt.Errorf("%v: %s", err, string(output))
}
return string(output), nil
}

View File

@ -1,24 +0,0 @@
package deployer
import "text/template"
var defaultIndexTemplate = template.Must(template.New("main").Parse(`<html>
<head><meta charset="utf-8"></head>
<body>
<script src="{{ .Script }}"></script>
<script src="{{ .Loader }}"></script>
</body>
</html>`))
var loaderTemplate = template.Must(template.New("main").Parse(`if (!WebAssembly.instantiateStreaming) {
WebAssembly.instantiateStreaming = async (resp, importObject) => {
const source = await (await resp).arrayBuffer();
return await WebAssembly.instantiate(source, importObject);
};
}
const go = new Go();
WebAssembly.instantiateStreaming(fetch("{{ .Binary }}"), go.importObject).then(result => {
go.run(result.instance);
});`))
var loaderTemplateMin = template.Must(template.New("main").Parse(`WebAssembly.instantiateStreaming||(WebAssembly.instantiateStreaming=(async(t,a)=>{const e=await(await t).arrayBuffer();return await WebAssembly.instantiate(e,a)}));const go=new Go;WebAssembly.instantiateStreaming(fetch("{{ .Binary }}"),go.importObject).then(t=>{go.run(t.instance)});`))

View File

@ -1,23 +0,0 @@
package cmd
import (
"fmt"
"os"
"github.com/dave/wasmgo/cmd/cmdconfig"
"github.com/spf13/cobra"
)
var global = &cmdconfig.Config{}
var rootCmd = &cobra.Command{
Use: "wasmgo",
Short: "Compile Go to WASM, test locally or deploy to jsgo.io",
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}

View File

@ -1,18 +0,0 @@
package cmd
/*
func init() {
serveCmd.PersistentFlags().IntVarP(&global.Port, "port", "p", 8080, "Server port. If this is in use, an unused port is chosen.")
rootCmd.AddCommand(serveCmd)
}
var serveCmd = &cobra.Command{
Use: "serve [package]",
Short: "Serve locally",
Long: "Starts a webserver locally, and recompiles the WASM on every page refresh, for testing and development.",
Args: cobra.RangeArgs(0, 1),
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("Serve mode coming soon...")
},
}
*/

View File

@ -1,20 +0,0 @@
package cmd
import (
"fmt"
"github.com/dave/wasmgo/cmd/deployer"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(versionCmd)
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Show client version number",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(deployer.CLIENT_VERSION)
},
}

View File

@ -1,7 +0,0 @@
// +build dev
package config
const (
DEV = true
)

View File

@ -1,7 +0,0 @@
// +build !dev
package config
const (
DEV = false
)

View File

@ -1,7 +0,0 @@
package config
const (
Wasm = "wasm"
Index = "index"
Pkg = "pkg"
)

View File

@ -1,19 +0,0 @@
// +build dev,!local
package config
const (
LOCAL = false
)
var Host = map[string]string{
Wasm: "localhost:8083",
Pkg: "dev-pkg.jsgo.io",
Index: "dev-index.jsgo.io",
}
var Protocol = map[string]string{
Wasm: "http",
Pkg: "https",
Index: "https",
}

View File

@ -1,19 +0,0 @@
// +build local
package config
const (
LOCAL = true
)
var Host = map[string]string{
Wasm: "localhost:8083",
Pkg: "localhost:8092",
Index: "localhost:8093",
}
var Protocol = map[string]string{
Wasm: "http",
Pkg: "http",
Index: "http",
}

View File

@ -1,19 +0,0 @@
// +build !dev,!local
package config
const (
LOCAL = false
)
var Host = map[string]string{
Wasm: "wasm.jsgo.io",
Pkg: "pkg.jsgo.io",
Index: "jsgo.io",
}
var Protocol = map[string]string{
Wasm: "https",
Pkg: "https",
Index: "https",
}

View File

@ -1,7 +0,0 @@
package main
import "github.com/dave/wasmgo/cmd"
func main() {
cmd.Execute()
}

View File

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View File

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

View File

@ -1,31 +0,0 @@
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,253 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer deep copy and merge.
// TODO: RawMessage.
package proto
import (
"fmt"
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
func Clone(src Message) Message {
in := reflect.ValueOf(src)
if in.IsNil() {
return src
}
out := reflect.New(in.Type().Elem())
dst := out.Interface().(Message)
Merge(dst, src)
return dst
}
// Merger is the interface representing objects that can merge messages of the same type.
type Merger interface {
// Merge merges src into this message.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
//
// Merge may panic if called with a different argument type than the receiver.
Merge(src Message)
}
// generatedMerger is the custom merge method that generated protos will have.
// We must add this method since a generate Merge method will conflict with
// many existing protos that have a Merge data field already defined.
type generatedMerger interface {
XXX_Merge(src Message)
}
// Merge merges src into dst.
// Required and optional fields that are set in src will be set to that value in dst.
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
if m, ok := dst.(Merger); ok {
m.Merge(src)
return
}
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
}
if in.IsNil() {
return // Merge from nil src is a noop
}
if m, ok := dst.(generatedMerger); ok {
m.XXX_Merge(src)
return
}
mergeStruct(out.Elem(), in.Elem())
}
func mergeStruct(out, in reflect.Value) {
sprop := GetProperties(in.Type())
for i := 0; i < in.NumField(); i++ {
f := in.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
uf := in.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return
}
uin := uf.Bytes()
if len(uin) > 0 {
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
}
}
// mergeAny performs a merge between two values of the same type.
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
// prop is set if this is a struct field (it may be nil).
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
if in.Type() == protoMessageType {
if !in.IsNil() {
if out.IsNil() {
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
} else {
Merge(out.Interface().(Message), in.Interface().(Message))
}
}
return
}
switch in.Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
if !viaPtr && isProto3Zero(in) {
return
}
out.Set(in)
case reflect.Interface:
// Probably a oneof field; copy non-nil values.
if in.IsNil() {
return
}
// Allocate destination if it is not set, or set to a different type.
// Otherwise we will merge as normal.
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
}
mergeAny(out.Elem(), in.Elem(), false, nil)
case reflect.Map:
if in.Len() == 0 {
return
}
if out.IsNil() {
out.Set(reflect.MakeMap(in.Type()))
}
// For maps with value types of *T or []byte we need to deep copy each value.
elemKind := in.Type().Elem().Kind()
for _, key := range in.MapKeys() {
var val reflect.Value
switch elemKind {
case reflect.Ptr:
val = reflect.New(in.Type().Elem().Elem())
mergeAny(val, in.MapIndex(key), false, nil)
case reflect.Slice:
val = in.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
default:
val = in.MapIndex(key)
}
out.SetMapIndex(key, val)
}
case reflect.Ptr:
if in.IsNil() {
return
}
if out.IsNil() {
out.Set(reflect.New(in.Elem().Type()))
}
mergeAny(out.Elem(), in.Elem(), true, nil)
case reflect.Slice:
if in.IsNil() {
return
}
if in.Type().Elem().Kind() == reflect.Uint8 {
// []byte is a scalar bytes field, not a repeated field.
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value, and should not
// be merged.
if prop != nil && prop.proto3 && in.Len() == 0 {
return
}
// Make a deep copy.
// Append to []byte{} instead of []byte(nil) so that we never end up
// with a nil result.
out.SetBytes(append([]byte{}, in.Bytes()...))
return
}
n := in.Len()
if out.IsNil() {
out.Set(reflect.MakeSlice(in.Type(), 0, n))
}
switch in.Type().Elem().Kind() {
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
reflect.String, reflect.Uint32, reflect.Uint64:
out.Set(reflect.AppendSlice(out, in))
default:
for i := 0; i < n; i++ {
x := reflect.Indirect(reflect.New(in.Type().Elem()))
mergeAny(x, in.Index(i), false, nil)
out.Set(reflect.Append(out, x))
}
}
case reflect.Struct:
mergeStruct(out, in)
default:
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to copy %v", in)
}
}
func mergeExtension(out, in map[int32]Extension) {
for extNum, eIn := range in {
eOut := Extension{desc: eIn.desc}
if eIn.value != nil {
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
eOut.value = v.Interface()
}
if eIn.enc != nil {
eOut.enc = make([]byte, len(eIn.enc))
copy(eOut.enc, eIn.enc)
}
out[extNum] = eOut
}
}

View File

@ -1,428 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for decoding protocol buffer data to construct in-memory representations.
*/
import (
"errors"
"fmt"
"io"
)
// errOverflow is returned when an integer is too large to be represented.
var errOverflow = errors.New("proto: integer overflow")
// ErrInternalBadWireType is returned by generated code when an incorrect
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
}
b := uint64(buf[n])
n++
x |= (b & 0x7F) << shift
if (b & 0x80) == 0 {
return x, n
}
}
// The number is too large to represent in a 64-bit value.
return 0, 0
}
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
for shift := uint(0); shift < 64; shift += 7 {
if i >= l {
err = io.ErrUnexpectedEOF
return
}
b := p.buf[i]
i++
x |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
p.index = i
return
}
}
// The number is too large to represent in a 64-bit value.
err = errOverflow
return
}
// DecodeVarint reads a varint-encoded integer from the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) DecodeVarint() (x uint64, err error) {
i := p.index
buf := p.buf
if i >= len(buf) {
return 0, io.ErrUnexpectedEOF
} else if buf[i] < 0x80 {
p.index++
return uint64(buf[i]), nil
} else if len(buf)-i < 10 {
return p.decodeVarintSlow()
}
var b uint64
// we already checked the first byte
x = uint64(buf[i]) - 0x80
i++
b = uint64(buf[i])
i++
x += b << 7
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 7
b = uint64(buf[i])
i++
x += b << 14
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 14
b = uint64(buf[i])
i++
x += b << 21
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 21
b = uint64(buf[i])
i++
x += b << 28
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 28
b = uint64(buf[i])
i++
x += b << 35
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 35
b = uint64(buf[i])
i++
x += b << 42
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 42
b = uint64(buf[i])
i++
x += b << 49
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 49
b = uint64(buf[i])
i++
x += b << 56
if b&0x80 == 0 {
goto done
}
x -= 0x80 << 56
b = uint64(buf[i])
i++
x += b << 63
if b&0x80 == 0 {
goto done
}
// x -= 0x80 << 63 // Always zero.
return 0, errOverflow
done:
p.index = i
return x, nil
}
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
// x, err already 0
i := p.index + 8
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-8])
x |= uint64(p.buf[i-7]) << 8
x |= uint64(p.buf[i-6]) << 16
x |= uint64(p.buf[i-5]) << 24
x |= uint64(p.buf[i-4]) << 32
x |= uint64(p.buf[i-3]) << 40
x |= uint64(p.buf[i-2]) << 48
x |= uint64(p.buf[i-1]) << 56
return
}
// DecodeFixed32 reads a 32-bit integer from the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
// x, err already 0
i := p.index + 4
if i < 0 || i > len(p.buf) {
err = io.ErrUnexpectedEOF
return
}
p.index = i
x = uint64(p.buf[i-4])
x |= uint64(p.buf[i-3]) << 8
x |= uint64(p.buf[i-2]) << 16
x |= uint64(p.buf[i-1]) << 24
return
}
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
// from the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
return
}
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
// from the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
x, err = p.DecodeVarint()
if err != nil {
return
}
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
return
}
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
n, err := p.DecodeVarint()
if err != nil {
return nil, err
}
nb := int(n)
if nb < 0 {
return nil, fmt.Errorf("proto: bad byte length %d", nb)
}
end := p.index + nb
if end < p.index || end > len(p.buf) {
return nil, io.ErrUnexpectedEOF
}
if !alloc {
// todo: check if can get more uses of alloc=false
buf = p.buf[p.index:end]
p.index += nb
return
}
buf = make([]byte, nb)
copy(buf, p.buf[p.index:])
p.index += nb
return
}
// DecodeStringBytes reads an encoded string from the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) DecodeStringBytes() (s string, err error) {
buf, err := p.DecodeRawBytes(false)
if err != nil {
return
}
return string(buf), nil
}
// Unmarshaler is the interface representing objects that can
// unmarshal themselves. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
// Unmarshal implementations should not clear the receiver.
// Any unmarshaled data should be merged into the receiver.
// Callers of Unmarshal that do not want to retain existing data
// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface {
Unmarshal([]byte) error
}
// newUnmarshaler is the interface representing objects that can
// unmarshal themselves. The semantics are identical to Unmarshaler.
//
// This exists to support protoc-gen-go generated messages.
// The proto package will stop type-asserting to this interface in the future.
//
// DO NOT DEPEND ON THIS.
type newUnmarshaler interface {
XXX_Unmarshal([]byte) error
}
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// Unmarshal resets pb before starting to unmarshal, so any
// existing data in pb is always removed. Use UnmarshalMerge
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
// writes the decoded result to pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
//
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
if u, ok := pb.(newUnmarshaler); ok {
return u.XXX_Unmarshal(buf)
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
}
// DecodeMessage reads a count-delimited message from the Buffer.
func (p *Buffer) DecodeMessage(pb Message) error {
enc, err := p.DecodeRawBytes(false)
if err != nil {
return err
}
return NewBuffer(enc).Unmarshal(pb)
}
// DecodeGroup reads a tag-delimited group from the Buffer.
// StartGroup tag is already consumed. This function consumes
// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error {
b := p.buf[p.index:]
x, y := findEndGroup(b)
if x < 0 {
return io.ErrUnexpectedEOF
}
err := Unmarshal(b[:x], pb)
p.index += y
return err
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
//
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
if u, ok := pb.(newUnmarshaler); ok {
err := u.XXX_Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
if u, ok := pb.(Unmarshaler); ok {
// NOTE: The history of proto have unfortunately been inconsistent
// whether Unmarshaler should or should not implicitly clear itself.
// Some implementations do, most do not.
// Thus, calling this here may or may not do what people want.
//
// See https://github.com/golang/protobuf/issues/424
err := u.Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
// Slow workaround for messages that aren't Unmarshalers.
// This includes some hand-coded .pb.go files and
// bootstrap protos.
// TODO: fix all of those and then add Unmarshal to
// the Message interface. Then:
// The cast above and code below can be deleted.
// The old unmarshaler can be deleted.
// Clients can call Unmarshal directly (can already do that, actually).
var info InternalMessageInfo
err := info.Unmarshal(pb, p.buf[p.index:])
p.index = len(p.buf)
return err
}

View File

@ -1,350 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
type generatedDiscarder interface {
XXX_DiscardUnknown()
}
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
//
// For proto2 messages, the unknown fields of message extensions are only
// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
if m, ok := m.(generatedDiscarder); ok {
m.XXX_DiscardUnknown()
return
}
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
// but the master branch has no implementation for InternalMessageInfo,
// so it would be more work to replicate that approach.
discardLegacy(m)
}
// DiscardUnknown recursively discards all unknown fields.
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
di := atomicLoadDiscardInfo(&a.discard)
if di == nil {
di = getDiscardInfo(reflect.TypeOf(m).Elem())
atomicStoreDiscardInfo(&a.discard, di)
}
di.discard(toPointer(&m))
}
type discardInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []discardFieldInfo
unrecognized field
}
type discardFieldInfo struct {
field field // Offset of field, guaranteed to be valid
discard func(src pointer)
}
var (
discardInfoMap = map[reflect.Type]*discardInfo{}
discardInfoLock sync.Mutex
)
func getDiscardInfo(t reflect.Type) *discardInfo {
discardInfoLock.Lock()
defer discardInfoLock.Unlock()
di := discardInfoMap[t]
if di == nil {
di = &discardInfo{typ: t}
discardInfoMap[t] = di
}
return di
}
func (di *discardInfo) discard(src pointer) {
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&di.initialized) == 0 {
di.computeDiscardInfo()
}
for _, fi := range di.fields {
sfp := src.offset(fi.field)
fi.discard(sfp)
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
// Ignore lock since DiscardUnknown is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
DiscardUnknown(m)
}
}
}
if di.unrecognized.IsValid() {
*src.offset(di.unrecognized).toBytes() = nil
}
}
func (di *discardInfo) computeDiscardInfo() {
di.lock.Lock()
defer di.lock.Unlock()
if di.initialized != 0 {
return
}
t := di.typ
n := t.NumField()
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
dfi := discardFieldInfo{field: toField(&f)}
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
case isSlice: // E.g., []*pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sps := src.getPointerSlice()
for _, sp := range sps {
if !sp.isNil() {
di.discard(sp)
}
}
}
default: // E.g., *pb.T
di := getDiscardInfo(tf)
dfi.discard = func(src pointer) {
sp := src.getPointer()
if !sp.isNil() {
di.discard(sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
default: // E.g., map[K]V
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
dfi.discard = func(src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
DiscardUnknown(val.Interface().(Message))
}
}
} else {
dfi.discard = func(pointer) {} // Noop
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
default: // E.g., interface{}
// TODO: Make this faster?
dfi.discard = func(src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
DiscardUnknown(sv.Interface().(Message))
}
}
}
}
default:
continue
}
di.fields = append(di.fields, dfi)
}
di.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
di.unrecognized = toField(&f)
}
atomic.StoreInt32(&di.initialized, 1)
}
func discardLegacy(m Message) {
v := reflect.ValueOf(m)
if v.Kind() != reflect.Ptr || v.IsNil() {
return
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
vf := v.Field(i)
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
case isSlice: // E.g., []*pb.T
for j := 0; j < vf.Len(); j++ {
discardLegacy(vf.Index(j).Interface().(Message))
}
default: // E.g., *pb.T
discardLegacy(vf.Interface().(Message))
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
default: // E.g., map[K]V
tv := vf.Type().Elem()
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
for _, key := range vf.MapKeys() {
val := vf.MapIndex(key)
discardLegacy(val.Interface().(Message))
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
default: // E.g., test_proto.isCommunique_Union interface
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
if !vf.IsNil() {
vf = vf.Elem() // E.g., test_proto.Communique_Msg
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
if vf.Kind() == reflect.Ptr {
discardLegacy(vf.Interface().(Message))
}
}
}
}
}
}
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
if vf.Type() != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
vf.Set(reflect.ValueOf([]byte(nil)))
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, err := extendable(m); err == nil {
// Ignore lock since discardLegacy is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
discardLegacy(m)
}
}
}
}

View File

@ -1,221 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"errors"
"fmt"
"reflect"
)
// RequiredNotSetError is the error returned if Marshal is called with
// a protocol buffer struct whose required fields have not
// all been initialized. It is also the error returned if Unmarshal is
// called with an encoded protocol buffer that does not include all the
// required fields.
//
// When printed, RequiredNotSetError reports the first unset required field in a
// message. If the field cannot be precisely determined, it is reported as
// "{Unknown}".
type RequiredNotSetError struct {
field string
}
func (e *RequiredNotSetError) Error() string {
return fmt.Sprintf("proto: required field %q not set", e.field)
}
var (
// errRepeatedHasNil is the error returned if Marshal is called with
// a struct with a repeated field containing a nil element.
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
// errOneofHasNil is the error returned if Marshal is called with
// a struct with a oneof field containing a nil element.
errOneofHasNil = errors.New("proto: oneof field has nil value")
// ErrNil is the error returned if Marshal is called with nil.
ErrNil = errors.New("proto: Marshal called with nil")
// ErrTooLarge is the error returned if Marshal is called with a
// message that encodes to >2GB.
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
)
// The fundamental encoders that put bytes on the wire.
// Those that take integer types all accept uint64 and are
// therefore of type valueEncoder.
const maxVarintBytes = 10 // maximum length of a varint
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
// Not used by the package itself, but helpful to clients
// wishing to use the same encoding.
func EncodeVarint(x uint64) []byte {
var buf [maxVarintBytes]byte
var n int
for n = 0; x > 127; n++ {
buf[n] = 0x80 | uint8(x&0x7F)
x >>= 7
}
buf[n] = uint8(x)
n++
return buf[0:n]
}
// EncodeVarint writes a varint-encoded integer to the Buffer.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func (p *Buffer) EncodeVarint(x uint64) error {
for x >= 1<<7 {
p.buf = append(p.buf, uint8(x&0x7f|0x80))
x >>= 7
}
p.buf = append(p.buf, uint8(x))
return nil
}
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
switch {
case x < 1<<7:
return 1
case x < 1<<14:
return 2
case x < 1<<21:
return 3
case x < 1<<28:
return 4
case x < 1<<35:
return 5
case x < 1<<42:
return 6
case x < 1<<49:
return 7
case x < 1<<56:
return 8
case x < 1<<63:
return 9
}
return 10
}
// EncodeFixed64 writes a 64-bit integer to the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
func (p *Buffer) EncodeFixed64(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24),
uint8(x>>32),
uint8(x>>40),
uint8(x>>48),
uint8(x>>56))
return nil
}
// EncodeFixed32 writes a 32-bit integer to the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
func (p *Buffer) EncodeFixed32(x uint64) error {
p.buf = append(p.buf,
uint8(x),
uint8(x>>8),
uint8(x>>16),
uint8(x>>24))
return nil
}
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
// to the Buffer.
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
// to the Buffer.
// This is the format used for the sint32 protocol buffer type.
func (p *Buffer) EncodeZigzag32(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
}
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
func (p *Buffer) EncodeRawBytes(b []byte) error {
p.EncodeVarint(uint64(len(b)))
p.buf = append(p.buf, b...)
return nil
}
// EncodeStringBytes writes an encoded string to the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) EncodeStringBytes(s string) error {
p.EncodeVarint(uint64(len(s)))
p.buf = append(p.buf, s...)
return nil
}
// Marshaler is the interface representing objects that can marshal themselves.
type Marshaler interface {
Marshal() ([]byte, error)
}
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
siz := Size(pb)
p.EncodeVarint(uint64(siz))
return p.Marshal(pb)
}
// All protocol buffer fields are nillable, but be careful.
func isNil(v reflect.Value) bool {
switch v.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return v.IsNil()
}
return false
}

View File

@ -1,300 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2011 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Protocol buffer comparison.
package proto
import (
"bytes"
"log"
"reflect"
"strings"
)
/*
Equal returns true iff protocol buffers a and b are equal.
The arguments must both be pointers to protocol buffer structs.
Equality is defined in this way:
- Two messages are equal iff they are the same type,
corresponding fields are equal, unknown field sets
are equal, and extensions sets are equal.
- Two set scalar fields are equal iff their values are equal.
If the fields are of a floating-point type, remember that
NaN != x for all x, including NaN. If the message is defined
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
and their corresponding elements are equal. Note a "bytes" field,
although represented by []byte, is not a repeated field and the
rule for the scalar fields described above applies.
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
- Two map fields are equal iff their lengths are the same,
and they contain the same set of elements. Zero-length map
fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
*/
func Equal(a, b Message) bool {
if a == nil || b == nil {
return a == b
}
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
if v1.Type() != v2.Type() {
return false
}
if v1.Kind() == reflect.Ptr {
if v1.IsNil() {
return v2.IsNil()
}
if v2.IsNil() {
return false
}
v1, v2 = v1.Elem(), v2.Elem()
}
if v1.Kind() != reflect.Struct {
return false
}
return equalStruct(v1, v2)
}
// v1 and v2 are known to have the same type.
func equalStruct(v1, v2 reflect.Value) bool {
sprop := GetProperties(v1.Type())
for i := 0; i < v1.NumField(); i++ {
f := v1.Type().Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
f1, f2 := v1.Field(i), v2.Field(i)
if f.Type.Kind() == reflect.Ptr {
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
// both unset
continue
} else if n1 != n2 {
// set/unset mismatch
return false
}
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
return false
}
}
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_InternalExtensions")
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
return false
}
}
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
em2 := v2.FieldByName("XXX_extensions")
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
return false
}
}
uf := v1.FieldByName("XXX_unrecognized")
if !uf.IsValid() {
return true
}
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
// prop may be nil.
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
if v1.Type() == protoMessageType {
m1, _ := v1.Interface().(Message)
m2, _ := v2.Interface().(Message)
return Equal(m1, m2)
}
switch v1.Kind() {
case reflect.Bool:
return v1.Bool() == v2.Bool()
case reflect.Float32, reflect.Float64:
return v1.Float() == v2.Float()
case reflect.Int32, reflect.Int64:
return v1.Int() == v2.Int()
case reflect.Interface:
// Probably a oneof field; compare the inner values.
n1, n2 := v1.IsNil(), v2.IsNil()
if n1 || n2 {
return n1 == n2
}
e1, e2 := v1.Elem(), v2.Elem()
if e1.Type() != e2.Type() {
return false
}
return equalAny(e1, e2, nil)
case reflect.Map:
if v1.Len() != v2.Len() {
return false
}
for _, key := range v1.MapKeys() {
val2 := v2.MapIndex(key)
if !val2.IsValid() {
// This key was not found in the second map.
return false
}
if !equalAny(v1.MapIndex(key), val2, nil) {
return false
}
}
return true
case reflect.Ptr:
// Maps may have nil values in them, so check for nil.
if v1.IsNil() && v2.IsNil() {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return equalAny(v1.Elem(), v2.Elem(), prop)
case reflect.Slice:
if v1.Type().Elem().Kind() == reflect.Uint8 {
// short circuit: []byte
// Edge case: if this is in a proto3 message, a zero length
// bytes field is considered the zero value.
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
return true
}
if v1.IsNil() != v2.IsNil() {
return false
}
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
}
if v1.Len() != v2.Len() {
return false
}
for i := 0; i < v1.Len(); i++ {
if !equalAny(v1.Index(i), v2.Index(i), prop) {
return false
}
}
return true
case reflect.String:
return v1.Interface().(string) == v2.Interface().(string)
case reflect.Struct:
return equalStruct(v1, v2)
case reflect.Uint32, reflect.Uint64:
return v1.Uint() == v2.Uint()
}
// unknown type, so not a protocol buffer
log.Printf("proto: don't know how to compare %v", v1)
return false
}
// base is the struct type that the extensions are based on.
// x1 and x2 are InternalExtensions.
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
em1, _ := x1.extensionsRead()
em2, _ := x2.extensionsRead()
return equalExtMap(base, em1, em2)
}
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
if len(em1) != len(em2) {
return false
}
for extNum, e1 := range em1 {
e2, ok := em2[extNum]
if !ok {
return false
}
m1, m2 := e1.value, e2.value
if m1 == nil && m2 == nil {
// Both have only encoded form.
if bytes.Equal(e1.enc, e2.enc) {
continue
}
// The bytes are different, but the extensions might still be
// equal. We need to decode them to compare.
}
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
continue
}
// At least one is encoded. To do a semantically correct comparison
// we need to unmarshal them first.
var desc *ExtensionDesc
if m := extensionMaps[base]; m != nil {
desc = m[extNum]
}
if desc == nil {
// If both have only encoded form and the bytes are the same,
// it is handled above. We get here when the bytes are different.
// We don't know how to decode it, so just compare them as byte
// slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
return false
}
var err error
if m1 == nil {
m1, err = decodeExtension(e1.enc, desc)
}
if m2 == nil && err == nil {
m2, err = decodeExtension(e2.enc, desc)
}
if err != nil {
// The encoded form is invalid.
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
return false
}
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
return false
}
}
return true
}

View File

@ -1,543 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Types and routines for supporting protocol buffer extensions.
*/
import (
"errors"
"fmt"
"io"
"reflect"
"strconv"
"sync"
)
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
var ErrMissingExtension = errors.New("proto: missing extension")
// ExtensionRange represents a range of message extensions for a protocol buffer.
// Used in code generated by the protocol compiler.
type ExtensionRange struct {
Start, End int32 // both inclusive
}
// extendableProto is an interface implemented by any protocol buffer generated by the current
// proto compiler that may be extended.
type extendableProto interface {
Message
ExtensionRangeArray() []ExtensionRange
extensionsWrite() map[int32]Extension
extensionsRead() (map[int32]Extension, sync.Locker)
}
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
// version of the proto compiler that may be extended.
type extendableProtoV1 interface {
Message
ExtensionRangeArray() []ExtensionRange
ExtensionMap() map[int32]Extension
}
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
type extensionAdapter struct {
extendableProtoV1
}
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
return e.ExtensionMap()
}
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
return e.ExtensionMap(), notLocker{}
}
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
type notLocker struct{}
func (n notLocker) Lock() {}
func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
func extendable(p interface{}) (extendableProto, error) {
switch p := p.(type) {
case extendableProto:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return p, nil
case extendableProtoV1:
if isNilPtr(p) {
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
}
return extensionAdapter{p}, nil
}
// Don't allocate a specific error containing %T:
// this is the hot path for Clone and MarshalText.
return nil, errNotExtendable
}
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
func isNilPtr(x interface{}) bool {
v := reflect.ValueOf(x)
return v.Kind() == reflect.Ptr && v.IsNil()
}
// XXX_InternalExtensions is an internal representation of proto extensions.
//
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
//
// The methods of XXX_InternalExtensions are not concurrency safe in general,
// but calls to logically read-only methods such as has and get may be executed concurrently.
type XXX_InternalExtensions struct {
// The struct must be indirect so that if a user inadvertently copies a
// generated message and its embedded XXX_InternalExtensions, they
// avoid the mayhem of a copied mutex.
//
// The mutex serializes all logically read-only operations to p.extensionMap.
// It is up to the client to ensure that write operations to p.extensionMap are
// mutually exclusive with other accesses.
p *struct {
mu sync.Mutex
extensionMap map[int32]Extension
}
}
// extensionsWrite returns the extension map, creating it on first use.
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
if e.p == nil {
e.p = new(struct {
mu sync.Mutex
extensionMap map[int32]Extension
})
e.p.extensionMap = make(map[int32]Extension)
}
return e.p.extensionMap
}
// extensionsRead returns the extensions map for read-only use. It may be nil.
// The caller must hold the returned mutex's lock when accessing Elements within the map.
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
if e.p == nil {
return nil, nil
}
return e.p.extensionMap, &e.p.mu
}
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
ExtendedType Message // nil pointer to the type that is being extended
ExtensionType interface{} // nil pointer to the extension type
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
Filename string // name of the file in which the extension is defined
}
func (ed *ExtensionDesc) repeated() bool {
t := reflect.TypeOf(ed.ExtensionType)
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
}
// Extension represents an extension in a message.
type Extension struct {
// When an extension is stored in a message using SetExtension
// only desc and value are set. When the message is marshaled
// enc will be set to the encoded form of the message.
//
// When a message is unmarshaled and contains extensions, each
// extension will have only enc set. When such an extension is
// accessed using GetExtension (or GetExtensions) desc and value
// will be set.
desc *ExtensionDesc
value interface{}
enc []byte
}
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
epb, err := extendable(base)
if err != nil {
return
}
extmap := epb.extensionsWrite()
extmap[id] = Extension{enc: b}
}
// isExtensionField returns true iff the given field number is in an extension range.
func isExtensionField(pb extendableProto, field int32) bool {
for _, er := range pb.ExtensionRangeArray() {
if er.Start <= field && field <= er.End {
return true
}
}
return false
}
// checkExtensionTypes checks that the given extension is valid for pb.
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
var pbi interface{} = pb
// Check the extended type.
if ea, ok := pbi.(extensionAdapter); ok {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
return errors.New("proto: bad extension number; not in declared ranges")
}
return nil
}
// extPropKey is sufficient to uniquely identify an extension.
type extPropKey struct {
base reflect.Type
field int32
}
var extProp = struct {
sync.RWMutex
m map[extPropKey]*Properties
}{
m: make(map[extPropKey]*Properties),
}
func extensionProperties(ed *ExtensionDesc) *Properties {
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
extProp.RLock()
if prop, ok := extProp.m[key]; ok {
extProp.RUnlock()
return prop
}
extProp.RUnlock()
extProp.Lock()
defer extProp.Unlock()
// Check again.
if prop, ok := extProp.m[key]; ok {
return prop
}
prop := new(Properties)
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
extProp.m[key] = prop
return prop
}
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
epb, err := extendable(pb)
if err != nil {
return false
}
extmap, mu := epb.extensionsRead()
if extmap == nil {
return false
}
mu.Lock()
_, ok := extmap[extension.Field]
mu.Unlock()
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
epb, err := extendable(pb)
if err != nil {
return
}
// TODO: Check types, field numbers, etc.?
extmap := epb.extensionsWrite()
delete(extmap, extension.Field)
}
// GetExtension retrieves a proto2 extended field from pb.
//
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
// then GetExtension parses the encoded field and returns a Go value of the specified type.
// If the field is not present, then the default value is returned (if one is specified),
// otherwise ErrMissingExtension is reported.
//
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
if extension.ExtendedType != nil {
// can only check type if this is a complete descriptor
if err := checkExtensionTypes(epb, extension); err != nil {
return nil, err
}
}
emap, mu := epb.extensionsRead()
if emap == nil {
return defaultExtensionValue(extension)
}
mu.Lock()
defer mu.Unlock()
e, ok := emap[extension.Field]
if !ok {
// defaultExtensionValue returns the default value or
// ErrMissingExtension if there is no default.
return defaultExtensionValue(extension)
}
if e.value != nil {
// Already decoded. Check the descriptor, though.
if e.desc != extension {
// This shouldn't happen. If it does, it means that
// GetExtension was called twice with two different
// descriptors with the same field number.
return nil, errors.New("proto: descriptor conflict")
}
return e.value, nil
}
if extension.ExtensionType == nil {
// incomplete descriptor
return e.enc, nil
}
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
}
// Remember the decoded version and drop the encoded version.
// That way it is safe to mutate what we return.
e.value = v
e.desc = extension
e.enc = nil
emap[extension.Field] = e
return e.value, nil
}
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
if extension.ExtensionType == nil {
// incomplete descriptor, so no default
return nil, ErrMissingExtension
}
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
sf, _, err := fieldDefault(t, props)
if err != nil {
return nil, err
}
if sf == nil || sf.value == nil {
// There is no default value.
return nil, ErrMissingExtension
}
if t.Kind() != reflect.Ptr {
// We do not need to return a Ptr, we can directly return sf.value.
return sf.value, nil
}
// We need to return an interface{} that is a pointer to sf.value.
value := reflect.New(t).Elem()
value.Set(reflect.New(value.Type().Elem()))
if sf.kind == reflect.Int32 {
// We may have an int32 or an enum, but the underlying data is int32.
// Since we can't set an int32 into a non int32 reflect.value directly
// set it as a int32.
value.Elem().SetInt(int64(sf.value.(int32)))
} else {
value.Elem().Set(reflect.ValueOf(sf.value))
}
return value.Interface(), nil
}
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
t := reflect.TypeOf(extension.ExtensionType)
unmarshal := typeUnmarshaler(t, extension.Tag)
// t is a pointer to a struct, pointer to basic type or a slice.
// Allocate space to store the pointer/slice.
value := reflect.New(t).Elem()
var err error
for {
x, n := decodeVarint(b)
if n == 0 {
return nil, io.ErrUnexpectedEOF
}
b = b[n:]
wire := int(x) & 7
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
if err != nil {
return nil, err
}
if len(b) == 0 {
break
}
}
return value.Interface(), nil
}
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
extensions = make([]interface{}, len(es))
for i, e := range es {
extensions[i], err = GetExtension(epb, e)
if err == ErrMissingExtension {
err = nil
}
if err != nil {
return
}
}
return
}
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
epb, err := extendable(pb)
if err != nil {
return nil, err
}
registeredExtensions := RegisteredExtensions(pb)
emap, mu := epb.extensionsRead()
if emap == nil {
return nil, nil
}
mu.Lock()
defer mu.Unlock()
extensions := make([]*ExtensionDesc, 0, len(emap))
for extid, e := range emap {
desc := e.desc
if desc == nil {
desc = registeredExtensions[extid]
if desc == nil {
desc = &ExtensionDesc{Field: extid}
}
}
extensions = append(extensions, desc)
}
return extensions, nil
}
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
epb, err := extendable(pb)
if err != nil {
return err
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
}
typ := reflect.TypeOf(extension.ExtensionType)
if typ != reflect.TypeOf(value) {
return errors.New("proto: bad extension value type")
}
// nil extension values need to be caught early, because the
// encoder can't distinguish an ErrNil due to a nil extension
// from an ErrNil due to a missing field. Extensions are
// always optional, so the encoder would just swallow the error
// and drop all the extensions from the encoded message.
if reflect.ValueOf(value).IsNil() {
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
}
extmap := epb.extensionsWrite()
extmap[extension.Field] = Extension{desc: extension, value: value}
return nil
}
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
epb, err := extendable(pb)
if err != nil {
return
}
m := epb.extensionsWrite()
for k := range m {
delete(m, k)
}
}
// A global registry of extensions.
// The generated code will register the generated descriptors by calling RegisterExtension.
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
// RegisterExtension is called from the generated code.
func RegisterExtension(desc *ExtensionDesc) {
st := reflect.TypeOf(desc.ExtendedType).Elem()
m := extensionMaps[st]
if m == nil {
m = make(map[int32]*ExtensionDesc)
extensionMaps[st] = m
}
if _, ok := m[desc.Field]; ok {
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
}
m[desc.Field] = desc
}
// RegisteredExtensions returns a map of the registered extensions of a
// protocol buffer struct, indexed by the extension number.
// The argument pb should be a nil pointer to the struct type.
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
return extensionMaps[reflect.TypeOf(pb).Elem()]
}

View File

@ -1,921 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package proto converts data structures to and from the wire format of
protocol buffers. It works in concert with the Go source code generated
for .proto files by the protocol compiler.
A summary of the properties of the protocol buffer interface
for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
Given file test.proto, containing
package example;
enum FOO { X = 17; }
message Test {
required string label = 1;
optional int32 type = 2 [default=77];
repeated int64 reps = 3;
optional group OptionalGroup = 4 {
required string RequiredField = 5;
}
oneof union {
int32 number = 6;
string name = 7;
}
}
The resulting file, test.pb.go, is:
package example
import proto "github.com/golang/protobuf/proto"
import math "math"
type FOO int32
const (
FOO_X FOO = 17
)
var FOO_name = map[int32]string{
17: "X",
}
var FOO_value = map[string]int32{
"X": 17,
}
func (x FOO) Enum() *FOO {
p := new(FOO)
*p = x
return p
}
func (x FOO) String() string {
return proto.EnumName(FOO_name, int32(x))
}
func (x *FOO) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
if err != nil {
return err
}
*x = FOO(value)
return nil
}
type Test struct {
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
// Types that are valid to be assigned to Union:
// *Test_Number
// *Test_Name
Union isTest_Union `protobuf_oneof:"union"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Test) Reset() { *m = Test{} }
func (m *Test) String() string { return proto.CompactTextString(m) }
func (*Test) ProtoMessage() {}
type isTest_Union interface {
isTest_Union()
}
type Test_Number struct {
Number int32 `protobuf:"varint,6,opt,name=number"`
}
type Test_Name struct {
Name string `protobuf:"bytes,7,opt,name=name"`
}
func (*Test_Number) isTest_Union() {}
func (*Test_Name) isTest_Union() {}
func (m *Test) GetUnion() isTest_Union {
if m != nil {
return m.Union
}
return nil
}
const Default_Test_Type int32 = 77
func (m *Test) GetLabel() string {
if m != nil && m.Label != nil {
return *m.Label
}
return ""
}
func (m *Test) GetType() int32 {
if m != nil && m.Type != nil {
return *m.Type
}
return Default_Test_Type
}
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
if m != nil {
return m.Optionalgroup
}
return nil
}
type Test_OptionalGroup struct {
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
}
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
func (m *Test_OptionalGroup) GetRequiredField() string {
if m != nil && m.RequiredField != nil {
return *m.RequiredField
}
return ""
}
func (m *Test) GetNumber() int32 {
if x, ok := m.GetUnion().(*Test_Number); ok {
return x.Number
}
return 0
}
func (m *Test) GetName() string {
if x, ok := m.GetUnion().(*Test_Name); ok {
return x.Name
}
return ""
}
func init() {
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
}
To create and play with a Test object:
package main
import (
"log"
"github.com/golang/protobuf/proto"
pb "./example.pb"
)
func main() {
test := &pb.Test{
Label: proto.String("hello"),
Type: proto.Int32(17),
Reps: []int64{1, 2, 3},
Optionalgroup: &pb.Test_OptionalGroup{
RequiredField: proto.String("good bye"),
},
Union: &pb.Test_Name{"fred"},
}
data, err := proto.Marshal(test)
if err != nil {
log.Fatal("marshaling error: ", err)
}
newTest := &pb.Test{}
err = proto.Unmarshal(data, newTest)
if err != nil {
log.Fatal("unmarshaling error: ", err)
}
// Now test and newTest contain the same data.
if test.GetLabel() != newTest.GetLabel() {
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
}
// Use a type switch to determine which oneof was set.
switch u := test.Union.(type) {
case *pb.Test_Number: // u.Number contains the number.
case *pb.Test_Name: // u.Name contains the string.
}
// etc.
}
*/
package proto
import (
"encoding/json"
"errors"
"fmt"
"log"
"reflect"
"sort"
"strconv"
"sync"
)
var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
String() string
ProtoMessage()
}
// Stats records allocation details about the protocol buffer encoders
// and decoders. Useful for tuning the library itself.
type Stats struct {
Emalloc uint64 // mallocs in encode
Dmalloc uint64 // mallocs in decode
Encode uint64 // number of encodes
Decode uint64 // number of decodes
Chit uint64 // number of cache hits
Cmiss uint64 // number of cache misses
Size uint64 // number of sizes
}
// Set to true to enable stats collection.
const collectStats = false
var stats Stats
// GetStats returns a copy of the global Stats structure.
func GetStats() Stats { return stats }
// A Buffer is a buffer manager for marshaling and unmarshaling
// protocol buffers. It may be reused between invocations to
// reduce memory usage. It is not necessary to use a Buffer;
// the global functions Marshal and Unmarshal create a
// temporary Buffer and are fine for most applications.
type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
deterministic bool
}
// NewBuffer allocates a new Buffer and initializes its internal data to
// the contents of the argument slice.
func NewBuffer(e []byte) *Buffer {
return &Buffer{buf: e}
}
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
func (p *Buffer) Reset() {
p.buf = p.buf[0:0] // for reading/writing
p.index = 0 // for reading
}
// SetBuf replaces the internal buffer with the slice,
// ready for unmarshaling the contents of the slice.
func (p *Buffer) SetBuf(s []byte) {
p.buf = s
p.index = 0
}
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
// SetDeterministic sets whether to use deterministic serialization.
//
// Deterministic serialization guarantees that for a given binary, equal
// messages will always be serialized to the same bytes. This implies:
//
// - Repeated serialization of a message will return the same bytes.
// - Different processes of the same binary (which may be executing on
// different machines) will serialize equal messages to the same bytes.
//
// Note that the deterministic serialization is NOT canonical across
// languages. It is not guaranteed to remain stable over time. It is unstable
// across different builds with schema changes due to unknown fields.
// Users who need canonical serialization (e.g., persistent storage in a
// canonical form, fingerprinting, etc.) should define their own
// canonicalization specification and implement their own serializer rather
// than relying on this API.
//
// If deterministic serialization is requested, map entries will be sorted
// by keys in lexographical order. This is an implementation detail and
// subject to change.
func (p *Buffer) SetDeterministic(deterministic bool) {
p.deterministic = deterministic
}
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
return &v
}
// Int32 is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it.
func Int32(v int32) *int32 {
return &v
}
// Int is a helper routine that allocates a new int32 value
// to store v and returns a pointer to it, but unlike Int32
// its argument value is an int.
func Int(v int) *int32 {
p := new(int32)
*p = int32(v)
return p
}
// Int64 is a helper routine that allocates a new int64 value
// to store v and returns a pointer to it.
func Int64(v int64) *int64 {
return &v
}
// Float32 is a helper routine that allocates a new float32 value
// to store v and returns a pointer to it.
func Float32(v float32) *float32 {
return &v
}
// Float64 is a helper routine that allocates a new float64 value
// to store v and returns a pointer to it.
func Float64(v float64) *float64 {
return &v
}
// Uint32 is a helper routine that allocates a new uint32 value
// to store v and returns a pointer to it.
func Uint32(v uint32) *uint32 {
return &v
}
// Uint64 is a helper routine that allocates a new uint64 value
// to store v and returns a pointer to it.
func Uint64(v uint64) *uint64 {
return &v
}
// String is a helper routine that allocates a new string value
// to store v and returns a pointer to it.
func String(v string) *string {
return &v
}
// EnumName is a helper function to simplify printing protocol buffer enums
// by name. Given an enum map and a value, it returns a useful string.
func EnumName(m map[int32]string, v int32) string {
s, ok := m[v]
if ok {
return s
}
return strconv.Itoa(int(v))
}
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
// from their JSON-encoded representation. Given a map from the enum's symbolic
// names to its int values, and a byte buffer containing the JSON-encoded
// value, it returns an int32 that can be cast to the enum type by the caller.
//
// The function can deal with both JSON representations, numeric and symbolic.
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
if data[0] == '"' {
// New style: enums are strings.
var repr string
if err := json.Unmarshal(data, &repr); err != nil {
return -1, err
}
val, ok := m[repr]
if !ok {
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
}
return val, nil
}
// Old style: enums are ints.
var val int32
if err := json.Unmarshal(data, &val); err != nil {
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
}
return val, nil
}
// DebugPrint dumps the encoded data in b in a debugging format with a header
// including the string s. Used in testing but made available for general debugging.
func (p *Buffer) DebugPrint(s string, b []byte) {
var u uint64
obuf := p.buf
index := p.index
p.buf = b
p.index = 0
depth := 0
fmt.Printf("\n--- %s ---\n", s)
out:
for {
for i := 0; i < depth; i++ {
fmt.Print(" ")
}
index := p.index
if index == len(p.buf) {
break
}
op, err := p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: fetching op err %v\n", index, err)
break out
}
tag := op >> 3
wire := op & 7
switch wire {
default:
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
index, tag, wire)
break out
case WireBytes:
var r []byte
r, err = p.DecodeRawBytes(false)
if err != nil {
break out
}
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
if len(r) <= 6 {
for i := 0; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
} else {
for i := 0; i < 3; i++ {
fmt.Printf(" %.2x", r[i])
}
fmt.Printf(" ..")
for i := len(r) - 3; i < len(r); i++ {
fmt.Printf(" %.2x", r[i])
}
}
fmt.Printf("\n")
case WireFixed32:
u, err = p.DecodeFixed32()
if err != nil {
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
case WireFixed64:
u, err = p.DecodeFixed64()
if err != nil {
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
case WireVarint:
u, err = p.DecodeVarint()
if err != nil {
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
break out
}
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
case WireStartGroup:
fmt.Printf("%3d: t=%3d start\n", index, tag)
depth++
case WireEndGroup:
depth--
fmt.Printf("%3d: t=%3d end\n", index, tag)
}
}
if depth != 0 {
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
}
fmt.Printf("\n")
p.buf = obuf
p.index = index
}
// SetDefaults sets unset protocol buffer fields to their default values.
// It only modifies fields that are both unset and have defined defaults.
// It recursively sets default values in any non-nil sub-messages.
func SetDefaults(pb Message) {
setDefaults(reflect.ValueOf(pb), true, false)
}
// v is a pointer to a struct.
func setDefaults(v reflect.Value, recur, zeros bool) {
v = v.Elem()
defaultMu.RLock()
dm, ok := defaults[v.Type()]
defaultMu.RUnlock()
if !ok {
dm = buildDefaultMessage(v.Type())
defaultMu.Lock()
defaults[v.Type()] = dm
defaultMu.Unlock()
}
for _, sf := range dm.scalars {
f := v.Field(sf.index)
if !f.IsNil() {
// field already set
continue
}
dv := sf.value
if dv == nil && !zeros {
// no explicit default, and don't want to set zeros
continue
}
fptr := f.Addr().Interface() // **T
// TODO: Consider batching the allocations we do here.
switch sf.kind {
case reflect.Bool:
b := new(bool)
if dv != nil {
*b = dv.(bool)
}
*(fptr.(**bool)) = b
case reflect.Float32:
f := new(float32)
if dv != nil {
*f = dv.(float32)
}
*(fptr.(**float32)) = f
case reflect.Float64:
f := new(float64)
if dv != nil {
*f = dv.(float64)
}
*(fptr.(**float64)) = f
case reflect.Int32:
// might be an enum
if ft := f.Type(); ft != int32PtrType {
// enum
f.Set(reflect.New(ft.Elem()))
if dv != nil {
f.Elem().SetInt(int64(dv.(int32)))
}
} else {
// int32 field
i := new(int32)
if dv != nil {
*i = dv.(int32)
}
*(fptr.(**int32)) = i
}
case reflect.Int64:
i := new(int64)
if dv != nil {
*i = dv.(int64)
}
*(fptr.(**int64)) = i
case reflect.String:
s := new(string)
if dv != nil {
*s = dv.(string)
}
*(fptr.(**string)) = s
case reflect.Uint8:
// exceptional case: []byte
var b []byte
if dv != nil {
db := dv.([]byte)
b = make([]byte, len(db))
copy(b, db)
} else {
b = []byte{}
}
*(fptr.(*[]byte)) = b
case reflect.Uint32:
u := new(uint32)
if dv != nil {
*u = dv.(uint32)
}
*(fptr.(**uint32)) = u
case reflect.Uint64:
u := new(uint64)
if dv != nil {
*u = dv.(uint64)
}
*(fptr.(**uint64)) = u
default:
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
}
}
for _, ni := range dm.nested {
f := v.Field(ni)
// f is *T or []*T or map[T]*T
switch f.Kind() {
case reflect.Ptr:
if f.IsNil() {
continue
}
setDefaults(f, recur, zeros)
case reflect.Slice:
for i := 0; i < f.Len(); i++ {
e := f.Index(i)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
case reflect.Map:
for _, k := range f.MapKeys() {
e := f.MapIndex(k)
if e.IsNil() {
continue
}
setDefaults(e, recur, zeros)
}
}
}
}
var (
// defaults maps a protocol buffer struct type to a slice of the fields,
// with its scalar fields set to their proto-declared non-zero default values.
defaultMu sync.RWMutex
defaults = make(map[reflect.Type]defaultMessage)
int32PtrType = reflect.TypeOf((*int32)(nil))
)
// defaultMessage represents information about the default values of a message.
type defaultMessage struct {
scalars []scalarField
nested []int // struct field index of nested messages
}
type scalarField struct {
index int // struct field index
kind reflect.Kind // element type (the T in *T or []T)
value interface{} // the proto-declared default value, or nil
}
// t is a struct type.
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
sprop := GetProperties(t)
for _, prop := range sprop.Prop {
fi, ok := sprop.decoderTags.get(prop.Tag)
if !ok {
// XXX_unrecognized
continue
}
ft := t.Field(fi).Type
sf, nested, err := fieldDefault(ft, prop)
switch {
case err != nil:
log.Print(err)
case nested:
dm.nested = append(dm.nested, fi)
case sf != nil:
sf.index = fi
dm.scalars = append(dm.scalars, *sf)
}
}
return dm
}
// fieldDefault returns the scalarField for field type ft.
// sf will be nil if the field can not have a default.
// nestedMessage will be true if this is a nested message.
// Note that sf.index is not set on return.
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
var canHaveDefault bool
switch ft.Kind() {
case reflect.Ptr:
if ft.Elem().Kind() == reflect.Struct {
nestedMessage = true
} else {
canHaveDefault = true // proto2 scalar field
}
case reflect.Slice:
switch ft.Elem().Kind() {
case reflect.Ptr:
nestedMessage = true // repeated message
case reflect.Uint8:
canHaveDefault = true // bytes field
}
case reflect.Map:
if ft.Elem().Kind() == reflect.Ptr {
nestedMessage = true // map with message values
}
}
if !canHaveDefault {
if nestedMessage {
return nil, true, nil
}
return nil, false, nil
}
// We now know that ft is a pointer or slice.
sf = &scalarField{kind: ft.Elem().Kind()}
// scalar fields without defaults
if !prop.HasDefault {
return sf, false, nil
}
// a scalar field: either *T or []byte
switch ft.Elem().Kind() {
case reflect.Bool:
x, err := strconv.ParseBool(prop.Default)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Float32:
x, err := strconv.ParseFloat(prop.Default, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
}
sf.value = float32(x)
case reflect.Float64:
x, err := strconv.ParseFloat(prop.Default, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.Int32:
x, err := strconv.ParseInt(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
}
sf.value = int32(x)
case reflect.Int64:
x, err := strconv.ParseInt(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
}
sf.value = x
case reflect.String:
sf.value = prop.Default
case reflect.Uint8:
// []byte (not *uint8)
sf.value = []byte(prop.Default)
case reflect.Uint32:
x, err := strconv.ParseUint(prop.Default, 10, 32)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
}
sf.value = uint32(x)
case reflect.Uint64:
x, err := strconv.ParseUint(prop.Default, 10, 64)
if err != nil {
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
}
sf.value = x
default:
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
}
return sf, false, nil
}
// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums.
func mapKeys(vs []reflect.Value) sort.Interface {
s := mapKeySorter{vs: vs}
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
if len(vs) == 0 {
return s
}
switch vs[0].Kind() {
case reflect.Int32, reflect.Int64:
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
case reflect.Bool:
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
case reflect.String:
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
default:
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
}
return s
}
type mapKeySorter struct {
vs []reflect.Value
less func(a, b reflect.Value) bool
}
func (s mapKeySorter) Len() int { return len(s.vs) }
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
func (s mapKeySorter) Less(i, j int) bool {
return s.less(s.vs[i], s.vs[j])
}
// isProto3Zero reports whether v is a zero proto3 value.
func isProto3Zero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Bool:
return !v.Bool()
case reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint32, reflect.Uint64:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.String:
return v.String() == ""
}
return false
}
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true
// InternalMessageInfo is a type used internally by generated .pb.go files.
// This type is not intended to be used by non-generated code.
// This type is not subject to any compatibility guarantee.
type InternalMessageInfo struct {
marshal *marshalInfo
unmarshal *unmarshalInfo
merge *mergeInfo
discard *discardInfo
}

View File

@ -1,314 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Support for message sets.
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"reflect"
"sort"
"sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
// A message type ID is required for storing a protocol buffer in a message set.
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
// The first two types (_MessageSet_Item and messageSet)
// model what the protocol compiler produces for the following protocol message:
// message MessageSet {
// repeated group Item = 1 {
// required int32 type_id = 2;
// required string message = 3;
// };
// }
// That is the MessageSet wire format. We can't use a proto to generate these
// because that would introduce a circular dependency between it and this package.
type _MessageSet_Item struct {
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
Message []byte `protobuf:"bytes,3,req,name=message"`
}
type messageSet struct {
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
XXX_unrecognized []byte
// TODO: caching?
}
// Make sure messageSet is a Message.
var _ Message = (*messageSet)(nil)
// messageTypeIder is an interface satisfied by a protocol buffer type
// that may be stored in a MessageSet.
type messageTypeIder interface {
MessageTypeId() int32
}
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
mti, ok := pb.(messageTypeIder)
if !ok {
return nil
}
id := mti.MessageTypeId()
for _, item := range ms.Item {
if *item.TypeId == id {
return item
}
}
return nil
}
func (ms *messageSet) Has(pb Message) bool {
return ms.find(pb) != nil
}
func (ms *messageSet) Unmarshal(pb Message) error {
if item := ms.find(pb); item != nil {
return Unmarshal(item.Message, pb)
}
if _, ok := pb.(messageTypeIder); !ok {
return errNoMessageTypeID
}
return nil // TODO: return error instead?
}
func (ms *messageSet) Marshal(pb Message) error {
msg, err := Marshal(pb)
if err != nil {
return err
}
if item := ms.find(pb); item != nil {
// reuse existing item
item.Message = msg
return nil
}
mti, ok := pb.(messageTypeIder)
if !ok {
return errNoMessageTypeID
}
mtid := mti.MessageTypeId()
ms.Item = append(ms.Item, &_MessageSet_Item{
TypeId: &mtid,
Message: msg,
})
return nil
}
func (ms *messageSet) Reset() { *ms = messageSet{} }
func (ms *messageSet) String() string { return CompactTextString(ms) }
func (*messageSet) ProtoMessage() {}
// Support for the message_set_wire_format message option.
func skipVarint(buf []byte) []byte {
i := 0
for ; buf[i]&0x80 != 0; i++ {
}
return buf[i+1:]
}
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
return marshalMessageSet(exts, false)
}
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
switch exts := exts.(type) {
case *XXX_InternalExtensions:
var u marshalInfo
siz := u.sizeMessageSet(exts)
b := make([]byte, 0, siz)
return u.appendMessageSet(b, exts, deterministic)
case map[int32]Extension:
// This is an old-style extension map.
// Wrap it in a new-style XXX_InternalExtensions.
ie := XXX_InternalExtensions{
p: &struct {
mu sync.Mutex
extensionMap map[int32]Extension
}{
extensionMap: exts,
},
}
var u marshalInfo
siz := u.sizeMessageSet(&ie)
b := make([]byte, 0, siz)
return u.appendMessageSet(b, &ie, deterministic)
default:
return nil, errors.New("proto: not an extension map")
}
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
m = exts.extensionsWrite()
case map[int32]Extension:
m = exts
default:
return errors.New("proto: not an extension map")
}
ms := new(messageSet)
if err := Unmarshal(buf, ms); err != nil {
return err
}
for _, item := range ms.Item {
id := *item.TypeId
msg := item.Message
// Restore wire type and field number varint, plus length varint.
// Be careful to preserve duplicate items.
b := EncodeVarint(uint64(id)<<3 | WireBytes)
if ext, ok := m[id]; ok {
// Existing data; rip off the tag and length varint
// so we join the new data correctly.
// We can assume that ext.enc is set because we are unmarshaling.
o := ext.enc[len(b):] // skip wire type and field number
_, n := DecodeVarint(o) // calculate length of length varint
o = o[n:] // skip length varint
msg = append(o, msg...) // join old data and new data
}
b = append(b, EncodeVarint(uint64(len(msg)))...)
b = append(b, msg...)
m[id] = Extension{enc: b}
}
return nil
}
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
var mu sync.Locker
m, mu = exts.extensionsRead()
if m != nil {
// Keep the extensions map locked until we're done marshaling to prevent
// races between marshaling and unmarshaling the lazily-{en,de}coded
// values.
mu.Lock()
defer mu.Unlock()
}
case map[int32]Extension:
m = exts
default:
return nil, errors.New("proto: not an extension map")
}
var b bytes.Buffer
b.WriteByte('{')
// Process the map in key order for deterministic output.
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
for i, id := range ids {
ext := m[id]
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
if i > 0 && b.Len() > 1 {
b.WriteByte(',')
}
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
if x == nil {
x = reflect.New(msd.t.Elem()).Interface()
if err := Unmarshal(ext.enc, x.(Message)); err != nil {
return nil, err
}
}
d, err := json.Marshal(x)
if err != nil {
return nil, err
}
b.Write(d)
}
b.WriteByte('}')
return b.Bytes(), nil
}
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
// Common-case fast path.
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
return nil
}
// This is fairly tricky, and it's not clear that it is needed.
return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
}
// A global registry of types that can be used in a MessageSet.
var messageSetMap = make(map[int32]messageSetDesc)
type messageSetDesc struct {
t reflect.Type // pointer to struct
name string
}
// RegisterMessageSetType is called from the generated code.
func RegisterMessageSetType(m Message, fieldNum int32, name string) {
messageSetMap[fieldNum] = messageSetDesc{
t: reflect.TypeOf(m),
name: name,
}
}

View File

@ -1,357 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
// be used on App Engine.
package proto
import (
"reflect"
"sync"
)
const unsafeAllowed = false
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return f.Index
}
// invalidField is an invalid field identifier.
var invalidField = field(nil)
// zeroField is a noop when calling pointer.offset.
var zeroField = field([]int{})
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
// The pointer type is for the table-driven decoder.
// The implementation here uses a reflect.Value of pointer type to
// create a generic pointer. In pointer_unsafe.go we use unsafe
// instead of reflect to implement the same (but faster) interface.
type pointer struct {
v reflect.Value
}
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
return pointer{v: reflect.ValueOf(*i)}
}
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
v := reflect.ValueOf(*i)
u := reflect.New(v.Type())
u.Elem().Set(v)
return pointer{v: u}
}
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{v: v}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
func (p pointer) isNil() bool {
return p.v.IsNil()
}
// grow updates the slice s in place to make it one element longer.
// s must be addressable.
// Returns the (addressable) new element.
func grow(s reflect.Value) reflect.Value {
n, m := s.Len(), s.Cap()
if n < m {
s.SetLen(n + 1)
} else {
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
}
return s.Index(n)
}
func (p pointer) toInt64() *int64 {
return p.v.Interface().(*int64)
}
func (p pointer) toInt64Ptr() **int64 {
return p.v.Interface().(**int64)
}
func (p pointer) toInt64Slice() *[]int64 {
return p.v.Interface().(*[]int64)
}
var int32ptr = reflect.TypeOf((*int32)(nil))
func (p pointer) toInt32() *int32 {
return p.v.Convert(int32ptr).Interface().(*int32)
}
// The toInt32Ptr/Slice methods don't work because of enums.
// Instead, we must use set/get methods for the int32ptr/slice case.
/*
func (p pointer) toInt32Ptr() **int32 {
return p.v.Interface().(**int32)
}
func (p pointer) toInt32Slice() *[]int32 {
return p.v.Interface().(*[]int32)
}
*/
func (p pointer) getInt32Ptr() *int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().(*int32)
}
// an enum
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
}
func (p pointer) setInt32Ptr(v int32) {
// Allocate value in a *int32. Possibly convert that to a *enum.
// Then assign it to a **int32 or **enum.
// Note: we can convert *int32 to *enum, but we can't convert
// **int32 to **enum!
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
}
// getInt32Slice copies []int32 from p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getInt32Slice() []int32 {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
return p.v.Elem().Interface().([]int32)
}
// an enum
// Allocate a []int32, then assign []enum's values into it.
// Note: we can't convert []enum to []int32.
slice := p.v.Elem()
s := make([]int32, slice.Len())
for i := 0; i < slice.Len(); i++ {
s[i] = int32(slice.Index(i).Int())
}
return s
}
// setInt32Slice copies []int32 into p as a new slice.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setInt32Slice(v []int32) {
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
// raw int32 type
p.v.Elem().Set(reflect.ValueOf(v))
return
}
// an enum
// Allocate a []enum, then assign []int32's values into it.
// Note: we can't convert []enum to []int32.
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
for i, x := range v {
slice.Index(i).SetInt(int64(x))
}
p.v.Elem().Set(slice)
}
func (p pointer) appendInt32Slice(v int32) {
grow(p.v.Elem()).SetInt(int64(v))
}
func (p pointer) toUint64() *uint64 {
return p.v.Interface().(*uint64)
}
func (p pointer) toUint64Ptr() **uint64 {
return p.v.Interface().(**uint64)
}
func (p pointer) toUint64Slice() *[]uint64 {
return p.v.Interface().(*[]uint64)
}
func (p pointer) toUint32() *uint32 {
return p.v.Interface().(*uint32)
}
func (p pointer) toUint32Ptr() **uint32 {
return p.v.Interface().(**uint32)
}
func (p pointer) toUint32Slice() *[]uint32 {
return p.v.Interface().(*[]uint32)
}
func (p pointer) toBool() *bool {
return p.v.Interface().(*bool)
}
func (p pointer) toBoolPtr() **bool {
return p.v.Interface().(**bool)
}
func (p pointer) toBoolSlice() *[]bool {
return p.v.Interface().(*[]bool)
}
func (p pointer) toFloat64() *float64 {
return p.v.Interface().(*float64)
}
func (p pointer) toFloat64Ptr() **float64 {
return p.v.Interface().(**float64)
}
func (p pointer) toFloat64Slice() *[]float64 {
return p.v.Interface().(*[]float64)
}
func (p pointer) toFloat32() *float32 {
return p.v.Interface().(*float32)
}
func (p pointer) toFloat32Ptr() **float32 {
return p.v.Interface().(**float32)
}
func (p pointer) toFloat32Slice() *[]float32 {
return p.v.Interface().(*[]float32)
}
func (p pointer) toString() *string {
return p.v.Interface().(*string)
}
func (p pointer) toStringPtr() **string {
return p.v.Interface().(**string)
}
func (p pointer) toStringSlice() *[]string {
return p.v.Interface().(*[]string)
}
func (p pointer) toBytes() *[]byte {
return p.v.Interface().(*[]byte)
}
func (p pointer) toBytesSlice() *[][]byte {
return p.v.Interface().(*[][]byte)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return p.v.Interface().(*XXX_InternalExtensions)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return p.v.Interface().(*map[int32]Extension)
}
func (p pointer) getPointer() pointer {
return pointer{v: p.v.Elem()}
}
func (p pointer) setPointer(q pointer) {
p.v.Elem().Set(q.v)
}
func (p pointer) appendPointer(q pointer) {
grow(p.v.Elem()).Set(q.v)
}
// getPointerSlice copies []*T from p as a new []pointer.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) getPointerSlice() []pointer {
if p.v.IsNil() {
return nil
}
n := p.v.Elem().Len()
s := make([]pointer, n)
for i := 0; i < n; i++ {
s[i] = pointer{v: p.v.Elem().Index(i)}
}
return s
}
// setPointerSlice copies []pointer into p as a new []*T.
// This behavior differs from the implementation in pointer_unsafe.go.
func (p pointer) setPointerSlice(v []pointer) {
if v == nil {
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
return
}
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
for _, p := range v {
s = reflect.Append(s, p.v)
}
p.v.Elem().Set(s)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
if p.v.Elem().IsNil() {
return pointer{v: p.v.Elem()}
}
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
}
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
// TODO: check that p.v.Type().Elem() == t?
return p.v
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
atomicLock.Lock()
defer atomicLock.Unlock()
return *p
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomicLock.Lock()
defer atomicLock.Unlock()
*p = v
}
var atomicLock sync.Mutex

View File

@ -1,308 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2012 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
package proto
import (
"reflect"
"sync/atomic"
"unsafe"
)
const unsafeAllowed = true
// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
// toField returns a field equivalent to the given reflect field.
func toField(f *reflect.StructField) field {
return field(f.Offset)
}
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
// zeroField is a noop when calling pointer.offset.
const zeroField = field(0)
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
return f != invalidField
}
// The pointer type below is for the new table-driven encoder/decoder.
// The implementation here uses unsafe.Pointer to create a generic pointer.
// In pointer_reflect.go we use reflect instead of unsafe to implement
// the same (but slower) interface.
type pointer struct {
p unsafe.Pointer
}
// size of pointer
var ptrSize = unsafe.Sizeof(uintptr(0))
// toPointer converts an interface of pointer type to a pointer
// that points to the same target.
func toPointer(i *Message) pointer {
// Super-tricky - read pointer out of data word of interface value.
// Saves ~25ns over the equivalent:
// return valToPointer(reflect.ValueOf(*i))
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// toAddrPointer converts an interface to a pointer that points to
// the interface data.
func toAddrPointer(i *interface{}, isptr bool) pointer {
// Super-tricky - read or get the address of data word of interface value.
if isptr {
// The interface is of pointer type, thus it is a direct interface.
// The data word is the pointer data itself. We take its address.
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
}
// The interface is not of pointer type. The data word is the pointer
// to the data.
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
// valToPointer converts v to a pointer. v must be of pointer type.
func valToPointer(v reflect.Value) pointer {
return pointer{p: unsafe.Pointer(v.Pointer())}
}
// offset converts from a pointer to a structure to a pointer to
// one of its fields.
func (p pointer) offset(f field) pointer {
// For safety, we should panic if !f.IsValid, however calling panic causes
// this to no longer be inlineable, which is a serious performance cost.
/*
if !f.IsValid() {
panic("invalid field")
}
*/
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
}
func (p pointer) isNil() bool {
return p.p == nil
}
func (p pointer) toInt64() *int64 {
return (*int64)(p.p)
}
func (p pointer) toInt64Ptr() **int64 {
return (**int64)(p.p)
}
func (p pointer) toInt64Slice() *[]int64 {
return (*[]int64)(p.p)
}
func (p pointer) toInt32() *int32 {
return (*int32)(p.p)
}
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
/*
func (p pointer) toInt32Ptr() **int32 {
return (**int32)(p.p)
}
func (p pointer) toInt32Slice() *[]int32 {
return (*[]int32)(p.p)
}
*/
func (p pointer) getInt32Ptr() *int32 {
return *(**int32)(p.p)
}
func (p pointer) setInt32Ptr(v int32) {
*(**int32)(p.p) = &v
}
// getInt32Slice loads a []int32 from p.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getInt32Slice() []int32 {
return *(*[]int32)(p.p)
}
// setInt32Slice stores a []int32 to p.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setInt32Slice(v []int32) {
*(*[]int32)(p.p) = v
}
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
func (p pointer) appendInt32Slice(v int32) {
s := (*[]int32)(p.p)
*s = append(*s, v)
}
func (p pointer) toUint64() *uint64 {
return (*uint64)(p.p)
}
func (p pointer) toUint64Ptr() **uint64 {
return (**uint64)(p.p)
}
func (p pointer) toUint64Slice() *[]uint64 {
return (*[]uint64)(p.p)
}
func (p pointer) toUint32() *uint32 {
return (*uint32)(p.p)
}
func (p pointer) toUint32Ptr() **uint32 {
return (**uint32)(p.p)
}
func (p pointer) toUint32Slice() *[]uint32 {
return (*[]uint32)(p.p)
}
func (p pointer) toBool() *bool {
return (*bool)(p.p)
}
func (p pointer) toBoolPtr() **bool {
return (**bool)(p.p)
}
func (p pointer) toBoolSlice() *[]bool {
return (*[]bool)(p.p)
}
func (p pointer) toFloat64() *float64 {
return (*float64)(p.p)
}
func (p pointer) toFloat64Ptr() **float64 {
return (**float64)(p.p)
}
func (p pointer) toFloat64Slice() *[]float64 {
return (*[]float64)(p.p)
}
func (p pointer) toFloat32() *float32 {
return (*float32)(p.p)
}
func (p pointer) toFloat32Ptr() **float32 {
return (**float32)(p.p)
}
func (p pointer) toFloat32Slice() *[]float32 {
return (*[]float32)(p.p)
}
func (p pointer) toString() *string {
return (*string)(p.p)
}
func (p pointer) toStringPtr() **string {
return (**string)(p.p)
}
func (p pointer) toStringSlice() *[]string {
return (*[]string)(p.p)
}
func (p pointer) toBytes() *[]byte {
return (*[]byte)(p.p)
}
func (p pointer) toBytesSlice() *[][]byte {
return (*[][]byte)(p.p)
}
func (p pointer) toExtensions() *XXX_InternalExtensions {
return (*XXX_InternalExtensions)(p.p)
}
func (p pointer) toOldExtensions() *map[int32]Extension {
return (*map[int32]Extension)(p.p)
}
// getPointerSlice loads []*T from p as a []pointer.
// The value returned is aliased with the original slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) getPointerSlice() []pointer {
// Super-tricky - p should point to a []*T where T is a
// message type. We load it as []pointer.
return *(*[]pointer)(p.p)
}
// setPointerSlice stores []pointer into p as a []*T.
// The value set is aliased with the input slice.
// This behavior differs from the implementation in pointer_reflect.go.
func (p pointer) setPointerSlice(v []pointer) {
// Super-tricky - p should point to a []*T where T is a
// message type. We store it as []pointer.
*(*[]pointer)(p.p) = v
}
// getPointer loads the pointer at p and returns it.
func (p pointer) getPointer() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}
}
// setPointer stores the pointer q at p.
func (p pointer) setPointer(q pointer) {
*(*unsafe.Pointer)(p.p) = q.p
}
// append q to the slice pointed to by p.
func (p pointer) appendPointer(q pointer) {
s := (*[]unsafe.Pointer)(p.p)
*s = append(*s, q.p)
}
// getInterfacePointer returns a pointer that points to the
// interface data of the interface pointed by p.
func (p pointer) getInterfacePointer() pointer {
// Super-tricky - read pointer out of data word of interface value.
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
}
// asPointerTo returns a reflect.Value that is a pointer to an
// object of type t stored at p.
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
return reflect.NewAt(t, p.p)
}
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
}
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}

View File

@ -1,544 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
/*
* Routines for encoding data into the wire format for protocol buffers.
*/
import (
"fmt"
"log"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
)
const debug bool = false
// Constants that identify the encoding of a value on the wire.
const (
WireVarint = 0
WireFixed64 = 1
WireBytes = 2
WireStartGroup = 3
WireEndGroup = 4
WireFixed32 = 5
)
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
type tagMap struct {
fastTags []int
slowTags map[int]int
}
// tagMapFastLimit is the upper bound on the tag number that will be stored in
// the tagMap slice rather than its map.
const tagMapFastLimit = 1024
func (p *tagMap) get(t int) (int, bool) {
if t > 0 && t < tagMapFastLimit {
if t >= len(p.fastTags) {
return 0, false
}
fi := p.fastTags[t]
return fi, fi >= 0
}
fi, ok := p.slowTags[t]
return fi, ok
}
func (p *tagMap) put(t int, fi int) {
if t > 0 && t < tagMapFastLimit {
for len(p.fastTags) < t+1 {
p.fastTags = append(p.fastTags, -1)
}
p.fastTags[t] = fi
return
}
if p.slowTags == nil {
p.slowTags = make(map[int]int)
}
p.slowTags[t] = fi
}
// StructProperties represents properties for all the fields of a struct.
// decoderTags and decoderOrigNames should only be used by the decoder.
type StructProperties struct {
Prop []*Properties // properties for each field
reqCount int // required count
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
OneofTypes map[string]*OneofProperties
}
// OneofProperties represents information about a specific field in a oneof.
type OneofProperties struct {
Type reflect.Type // pointer to generated struct type for this oneof field
Field int // struct field number of the containing oneof in the message
Prop *Properties
}
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
// See encode.go, (*Buffer).enc_struct.
func (sp *StructProperties) Len() int { return len(sp.order) }
func (sp *StructProperties) Less(i, j int) bool {
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
}
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
// Properties represents the protocol-specific behavior of a single struct field.
type Properties struct {
Name string // name of the field, for error messages
OrigName string // original name before protocol compiler (always set)
JSONName string // name to use for JSON; determined by protoc
Wire string
WireType int
Tag int
Required bool
Optional bool
Repeated bool
Packed bool // relevant for repeated primitives only
Enum string // set for enum types only
proto3 bool // whether this is known to be a proto3 field; set for []byte only
oneof bool // whether this is a oneof field
Default string // default value
HasDefault bool // whether an explicit default was provided
stype reflect.Type // set for struct types only
sprop *StructProperties // set for struct types only
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
s += ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
}
if p.Optional {
s += ",opt"
}
if p.Repeated {
s += ",rep"
}
if p.Packed {
s += ",packed"
}
s += ",name=" + p.OrigName
if p.JSONName != p.OrigName {
s += ",json=" + p.JSONName
}
if p.proto3 {
s += ",proto3"
}
if p.oneof {
s += ",oneof"
}
if len(p.Enum) > 0 {
s += ",enum=" + p.Enum
}
if p.HasDefault {
s += ",def=" + p.Default
}
return s
}
// Parse populates p by parsing a string in the protobuf struct field tag style.
func (p *Properties) Parse(s string) {
// "bytes,49,opt,name=foo,def=hello!"
fields := strings.Split(s, ",") // breaks def=, but handled below.
if len(fields) < 2 {
fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
return
}
p.Wire = fields[0]
switch p.Wire {
case "varint":
p.WireType = WireVarint
case "fixed32":
p.WireType = WireFixed32
case "fixed64":
p.WireType = WireFixed64
case "zigzag32":
p.WireType = WireVarint
case "zigzag64":
p.WireType = WireVarint
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
default:
fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
return
}
var err error
p.Tag, err = strconv.Atoi(fields[1])
if err != nil {
return
}
outer:
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
case f == "req":
p.Required = true
case f == "opt":
p.Optional = true
case f == "rep":
p.Repeated = true
case f == "packed":
p.Packed = true
case strings.HasPrefix(f, "name="):
p.OrigName = f[5:]
case strings.HasPrefix(f, "json="):
p.JSONName = f[5:]
case strings.HasPrefix(f, "enum="):
p.Enum = f[5:]
case f == "proto3":
p.proto3 = true
case f == "oneof":
p.oneof = true
case strings.HasPrefix(f, "def="):
p.HasDefault = true
p.Default = f[4:] // rest of string
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
break outer
}
}
}
}
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
// setFieldProps initializes the field properties for submessages and maps.
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
switch t1 := typ; t1.Kind() {
case reflect.Ptr:
if t1.Elem().Kind() == reflect.Struct {
p.stype = t1.Elem()
}
case reflect.Slice:
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
p.stype = t2.Elem()
}
case reflect.Map:
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
p.mvalprop = &Properties{}
vtype := p.mtype.Elem()
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
// The value type is not a message (*T) or bytes ([]byte),
// so we need encoders for the pointer to this type.
vtype = reflect.PtrTo(vtype)
}
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
} else {
p.sprop = getPropertiesLocked(p.stype)
}
}
}
var (
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
)
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
}
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
if tag == "" {
return
}
p.Parse(tag)
p.setFieldProps(typ, f, lockGetProp)
}
var (
propertiesMu sync.RWMutex
propertiesMap = make(map[reflect.Type]*StructProperties)
)
// GetProperties returns the list of properties for the type represented by t.
// t must represent a generated struct type of a protocol message.
func GetProperties(t reflect.Type) *StructProperties {
if t.Kind() != reflect.Struct {
panic("proto: type must have kind struct")
}
// Most calls to GetProperties in a long-running program will be
// retrieving details for types we have seen before.
propertiesMu.RLock()
sprop, ok := propertiesMap[t]
propertiesMu.RUnlock()
if ok {
if collectStats {
stats.Chit++
}
return sprop
}
propertiesMu.Lock()
sprop = getPropertiesLocked(t)
propertiesMu.Unlock()
return sprop
}
// getPropertiesLocked requires that propertiesMu is held.
func getPropertiesLocked(t reflect.Type) *StructProperties {
if prop, ok := propertiesMap[t]; ok {
if collectStats {
stats.Chit++
}
return prop
}
if collectStats {
stats.Cmiss++
}
prop := new(StructProperties)
// in case of recursive protos, fill this in now.
propertiesMap[t] = prop
// build properties
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
p := new(Properties)
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
p.OrigName = oneof
}
prop.Prop[i] = p
prop.order[i] = i
if debug {
print(i, " ", f.Name, " ", t.String(), " ")
if p.Tag > 0 {
print(p.String())
}
print("\n")
}
}
// Re-order prop.order.
sort.Sort(prop)
type oneofMessage interface {
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
_, _, _, oots = om.XXX_OneofFuncs()
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
for _, oot := range oots {
oop := &OneofProperties{
Type: reflect.ValueOf(oot).Type(), // *T
Prop: new(Properties),
}
sft := oop.Type.Elem().Field(0)
oop.Prop.Name = sft.Name
oop.Prop.Parse(sft.Tag.Get("protobuf"))
// There will be exactly one interface field that
// this new value is assignable to.
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type.Kind() != reflect.Interface {
continue
}
if !oop.Type.AssignableTo(f.Type) {
continue
}
oop.Field = i
break
}
prop.OneofTypes[oop.Prop.OrigName] = oop
}
}
// build required counts
// build tags
reqCount := 0
prop.decoderOrigNames = make(map[string]int)
for i, p := range prop.Prop {
if strings.HasPrefix(p.Name, "XXX_") {
// Internal fields should not appear in tags/origNames maps.
// They are handled specially when encoding and decoding.
continue
}
if p.Required {
reqCount++
}
prop.decoderTags.put(p.Tag, i)
prop.decoderOrigNames[p.OrigName] = i
}
prop.reqCount = reqCount
return prop
}
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
var enumValueMaps = make(map[string]map[string]int32)
// RegisterEnum is called from the generated code to install the enum descriptor
// maps into the global table to aid parsing text format protocol buffers.
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
if _, ok := enumValueMaps[typeName]; ok {
panic("proto: duplicate enum registered: " + typeName)
}
enumValueMaps[typeName] = valueMap
}
// EnumValueMap returns the mapping from names to integers of the
// enum type enumType, or a nil if not found.
func EnumValueMap(enumType string) map[string]int32 {
return enumValueMaps[enumType]
}
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
// Generated code always calls RegisterType with nil x.
// This check is just for extra safety.
protoTypedNils[name] = x
} else {
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
}
revProtoTypes[t] = name
}
// RegisterMapType is called from generated code and maps from the fully qualified
// proto name to the native map type of the proto map definition.
func RegisterMapType(x interface{}, name string) {
if reflect.TypeOf(x).Kind() != reflect.Map {
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
}
if _, ok := protoMapTypes[name]; ok {
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
protoMapTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
func MessageName(x Message) string {
type xname interface {
XXX_MessageName() string
}
if m, ok := x.(xname); ok {
return m.XXX_MessageName()
}
return revProtoTypes[reflect.TypeOf(x)]
}
// MessageType returns the message type (pointer to struct) for a named message.
// The type is not guaranteed to implement proto.Message if the name refers to a
// map entry.
func MessageType(name string) reflect.Type {
if t, ok := protoTypedNils[name]; ok {
return reflect.TypeOf(t)
}
return protoMapTypes[name]
}
// A registry of all linked proto files.
var (
protoFiles = make(map[string][]byte) // file name => fileDescriptor
)
// RegisterFile is called from generated code and maps from the
// full file name of a .proto file to its compressed FileDescriptorProto.
func RegisterFile(filename string, fileDescriptor []byte) {
protoFiles[filename] = fileDescriptor
}
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
func FileDescriptor(filename string) []byte { return protoFiles[filename] }

File diff suppressed because it is too large Load Diff

View File

@ -1,654 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
"sync"
"sync/atomic"
)
// Merge merges the src message into dst.
// This assumes that dst and src of the same type and are non-nil.
func (a *InternalMessageInfo) Merge(dst, src Message) {
mi := atomicLoadMergeInfo(&a.merge)
if mi == nil {
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
atomicStoreMergeInfo(&a.merge, mi)
}
mi.merge(toPointer(&dst), toPointer(&src))
}
type mergeInfo struct {
typ reflect.Type
initialized int32 // 0: only typ is valid, 1: everything is valid
lock sync.Mutex
fields []mergeFieldInfo
unrecognized field // Offset of XXX_unrecognized
}
type mergeFieldInfo struct {
field field // Offset of field, guaranteed to be valid
// isPointer reports whether the value in the field is a pointer.
// This is true for the following situations:
// * Pointer to struct
// * Pointer to basic type (proto2 only)
// * Slice (first value in slice header is a pointer)
// * String (first value in string header is a pointer)
isPointer bool
// basicWidth reports the width of the field assuming that it is directly
// embedded in the struct (as is the case for basic types in proto3).
// The possible values are:
// 0: invalid
// 1: bool
// 4: int32, uint32, float32
// 8: int64, uint64, float64
basicWidth int
// Where dst and src are pointers to the types being merged.
merge func(dst, src pointer)
}
var (
mergeInfoMap = map[reflect.Type]*mergeInfo{}
mergeInfoLock sync.Mutex
)
func getMergeInfo(t reflect.Type) *mergeInfo {
mergeInfoLock.Lock()
defer mergeInfoLock.Unlock()
mi := mergeInfoMap[t]
if mi == nil {
mi = &mergeInfo{typ: t}
mergeInfoMap[t] = mi
}
return mi
}
// merge merges src into dst assuming they are both of type *mi.typ.
func (mi *mergeInfo) merge(dst, src pointer) {
if dst.isNil() {
panic("proto: nil destination")
}
if src.isNil() {
return // Nothing to do.
}
if atomic.LoadInt32(&mi.initialized) == 0 {
mi.computeMergeInfo()
}
for _, fi := range mi.fields {
sfp := src.offset(fi.field)
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
continue
}
if fi.basicWidth > 0 {
switch {
case fi.basicWidth == 1 && !*sfp.toBool():
continue
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
continue
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
continue
}
}
}
dfp := dst.offset(fi.field)
fi.merge(dfp, sfp)
}
// TODO: Make this faster?
out := dst.asPointerTo(mi.typ).Elem()
in := src.asPointerTo(mi.typ).Elem()
if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
mOut := emOut.extensionsWrite()
muIn.Lock()
mergeExtension(mOut, mIn)
muIn.Unlock()
}
}
if mi.unrecognized.IsValid() {
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
}
}
}
func (mi *mergeInfo) computeMergeInfo() {
mi.lock.Lock()
defer mi.lock.Unlock()
if mi.initialized != 0 {
return
}
t := mi.typ
n := t.NumField()
props := GetProperties(t)
for i := 0; i < n; i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
mfi := mergeFieldInfo{field: toField(&f)}
tf := f.Type
// As an optimization, we can avoid the merge function call cost
// if we know for sure that the source will have no effect
// by checking if it is the zero value.
if unsafeAllowed {
switch tf.Kind() {
case reflect.Ptr, reflect.Slice, reflect.String:
// As a special case, we assume slices and strings are pointers
// since we know that the first field in the SliceSlice or
// StringHeader is a data pointer.
mfi.isPointer = true
case reflect.Bool:
mfi.basicWidth = 1
case reflect.Int32, reflect.Uint32, reflect.Float32:
mfi.basicWidth = 4
case reflect.Int64, reflect.Uint64, reflect.Float64:
mfi.basicWidth = 8
}
}
// Unwrap tf to get at its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic("both pointer and slice for basic type in " + tf.Name())
}
switch tf.Kind() {
case reflect.Int32:
switch {
case isSlice: // E.g., []int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
/*
sfsp := src.toInt32Slice()
if *sfsp != nil {
dfsp := dst.toInt32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
*/
sfs := src.getInt32Slice()
if sfs != nil {
dfs := dst.getInt32Slice()
dfs = append(dfs, sfs...)
if dfs == nil {
dfs = []int32{}
}
dst.setInt32Slice(dfs)
}
}
case isPointer: // E.g., *int32
mfi.merge = func(dst, src pointer) {
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
/*
sfpp := src.toInt32Ptr()
if *sfpp != nil {
dfpp := dst.toInt32Ptr()
if *dfpp == nil {
*dfpp = Int32(**sfpp)
} else {
**dfpp = **sfpp
}
}
*/
sfp := src.getInt32Ptr()
if sfp != nil {
dfp := dst.getInt32Ptr()
if dfp == nil {
dst.setInt32Ptr(*sfp)
} else {
*dfp = *sfp
}
}
}
default: // E.g., int32
mfi.merge = func(dst, src pointer) {
if v := *src.toInt32(); v != 0 {
*dst.toInt32() = v
}
}
}
case reflect.Int64:
switch {
case isSlice: // E.g., []int64
mfi.merge = func(dst, src pointer) {
sfsp := src.toInt64Slice()
if *sfsp != nil {
dfsp := dst.toInt64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []int64{}
}
}
}
case isPointer: // E.g., *int64
mfi.merge = func(dst, src pointer) {
sfpp := src.toInt64Ptr()
if *sfpp != nil {
dfpp := dst.toInt64Ptr()
if *dfpp == nil {
*dfpp = Int64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., int64
mfi.merge = func(dst, src pointer) {
if v := *src.toInt64(); v != 0 {
*dst.toInt64() = v
}
}
}
case reflect.Uint32:
switch {
case isSlice: // E.g., []uint32
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint32Slice()
if *sfsp != nil {
dfsp := dst.toUint32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint32{}
}
}
}
case isPointer: // E.g., *uint32
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint32Ptr()
if *sfpp != nil {
dfpp := dst.toUint32Ptr()
if *dfpp == nil {
*dfpp = Uint32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint32
mfi.merge = func(dst, src pointer) {
if v := *src.toUint32(); v != 0 {
*dst.toUint32() = v
}
}
}
case reflect.Uint64:
switch {
case isSlice: // E.g., []uint64
mfi.merge = func(dst, src pointer) {
sfsp := src.toUint64Slice()
if *sfsp != nil {
dfsp := dst.toUint64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []uint64{}
}
}
}
case isPointer: // E.g., *uint64
mfi.merge = func(dst, src pointer) {
sfpp := src.toUint64Ptr()
if *sfpp != nil {
dfpp := dst.toUint64Ptr()
if *dfpp == nil {
*dfpp = Uint64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., uint64
mfi.merge = func(dst, src pointer) {
if v := *src.toUint64(); v != 0 {
*dst.toUint64() = v
}
}
}
case reflect.Float32:
switch {
case isSlice: // E.g., []float32
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat32Slice()
if *sfsp != nil {
dfsp := dst.toFloat32Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float32{}
}
}
}
case isPointer: // E.g., *float32
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat32Ptr()
if *sfpp != nil {
dfpp := dst.toFloat32Ptr()
if *dfpp == nil {
*dfpp = Float32(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float32
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat32(); v != 0 {
*dst.toFloat32() = v
}
}
}
case reflect.Float64:
switch {
case isSlice: // E.g., []float64
mfi.merge = func(dst, src pointer) {
sfsp := src.toFloat64Slice()
if *sfsp != nil {
dfsp := dst.toFloat64Slice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []float64{}
}
}
}
case isPointer: // E.g., *float64
mfi.merge = func(dst, src pointer) {
sfpp := src.toFloat64Ptr()
if *sfpp != nil {
dfpp := dst.toFloat64Ptr()
if *dfpp == nil {
*dfpp = Float64(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., float64
mfi.merge = func(dst, src pointer) {
if v := *src.toFloat64(); v != 0 {
*dst.toFloat64() = v
}
}
}
case reflect.Bool:
switch {
case isSlice: // E.g., []bool
mfi.merge = func(dst, src pointer) {
sfsp := src.toBoolSlice()
if *sfsp != nil {
dfsp := dst.toBoolSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []bool{}
}
}
}
case isPointer: // E.g., *bool
mfi.merge = func(dst, src pointer) {
sfpp := src.toBoolPtr()
if *sfpp != nil {
dfpp := dst.toBoolPtr()
if *dfpp == nil {
*dfpp = Bool(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., bool
mfi.merge = func(dst, src pointer) {
if v := *src.toBool(); v {
*dst.toBool() = v
}
}
}
case reflect.String:
switch {
case isSlice: // E.g., []string
mfi.merge = func(dst, src pointer) {
sfsp := src.toStringSlice()
if *sfsp != nil {
dfsp := dst.toStringSlice()
*dfsp = append(*dfsp, *sfsp...)
if *dfsp == nil {
*dfsp = []string{}
}
}
}
case isPointer: // E.g., *string
mfi.merge = func(dst, src pointer) {
sfpp := src.toStringPtr()
if *sfpp != nil {
dfpp := dst.toStringPtr()
if *dfpp == nil {
*dfpp = String(**sfpp)
} else {
**dfpp = **sfpp
}
}
}
default: // E.g., string
mfi.merge = func(dst, src pointer) {
if v := *src.toString(); v != "" {
*dst.toString() = v
}
}
}
case reflect.Slice:
isProto3 := props.Prop[i].proto3
switch {
case isPointer:
panic("bad pointer in byte slice case in " + tf.Name())
case tf.Elem().Kind() != reflect.Uint8:
panic("bad element kind in byte slice case in " + tf.Name())
case isSlice: // E.g., [][]byte
mfi.merge = func(dst, src pointer) {
sbsp := src.toBytesSlice()
if *sbsp != nil {
dbsp := dst.toBytesSlice()
for _, sb := range *sbsp {
if sb == nil {
*dbsp = append(*dbsp, nil)
} else {
*dbsp = append(*dbsp, append([]byte{}, sb...))
}
}
if *dbsp == nil {
*dbsp = [][]byte{}
}
}
}
default: // E.g., []byte
mfi.merge = func(dst, src pointer) {
sbp := src.toBytes()
if *sbp != nil {
dbp := dst.toBytes()
if !isProto3 || len(*sbp) > 0 {
*dbp = append([]byte{}, *sbp...)
}
}
}
}
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("message field %s without pointer", tf))
case isSlice: // E.g., []*pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sps := src.getPointerSlice()
if sps != nil {
dps := dst.getPointerSlice()
for _, sp := range sps {
var dp pointer
if !sp.isNil() {
dp = valToPointer(reflect.New(tf))
mi.merge(dp, sp)
}
dps = append(dps, dp)
}
if dps == nil {
dps = []pointer{}
}
dst.setPointerSlice(dps)
}
}
default: // E.g., *pb.T
mi := getMergeInfo(tf)
mfi.merge = func(dst, src pointer) {
sp := src.getPointer()
if !sp.isNil() {
dp := dst.getPointer()
if dp.isNil() {
dp = valToPointer(reflect.New(tf))
dst.setPointer(dp)
}
mi.merge(dp, sp)
}
}
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic("bad pointer or slice in map case in " + tf.Name())
default: // E.g., map[K]V
mfi.merge = func(dst, src pointer) {
sm := src.asPointerTo(tf).Elem()
if sm.Len() == 0 {
return
}
dm := dst.asPointerTo(tf).Elem()
if dm.IsNil() {
dm.Set(reflect.MakeMap(tf))
}
switch tf.Elem().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(Clone(val.Interface().(Message)))
dm.SetMapIndex(key, val)
}
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
dm.SetMapIndex(key, val)
}
default: // Basic type (e.g., string)
for _, key := range sm.MapKeys() {
val := sm.MapIndex(key)
dm.SetMapIndex(key, val)
}
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic("bad pointer or slice in interface case in " + tf.Name())
default: // E.g., interface{}
// TODO: Make this faster?
mfi.merge = func(dst, src pointer) {
su := src.asPointerTo(tf).Elem()
if !su.IsNil() {
du := dst.asPointerTo(tf).Elem()
typ := su.Elem().Type()
if du.IsNil() || du.Elem().Type() != typ {
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
}
sv := su.Elem().Elem().Field(0)
if sv.Kind() == reflect.Ptr && sv.IsNil() {
return
}
dv := du.Elem().Elem().Field(0)
if dv.Kind() == reflect.Ptr && dv.IsNil() {
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
}
switch sv.Type().Kind() {
case reflect.Ptr: // Proto struct (e.g., *T)
Merge(dv.Interface().(Message), sv.Interface().(Message))
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
default: // Basic type (e.g., string)
dv.Set(sv)
}
}
}
}
default:
panic(fmt.Sprintf("merger not found for type:%s", tf))
}
mi.fields = append(mi.fields, mfi)
}
mi.unrecognized = invalidField
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
if f.Type != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
mi.unrecognized = toField(&f)
}
atomic.StoreInt32(&mi.initialized, 1)
}

File diff suppressed because it is too large Load Diff

View File

@ -1,843 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for writing the text protocol buffer format.
import (
"bufio"
"bytes"
"encoding"
"errors"
"fmt"
"io"
"log"
"math"
"reflect"
"sort"
"strings"
)
var (
newline = []byte("\n")
spaces = []byte(" ")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
backslashT = []byte{'\\', 't'}
backslashDQ = []byte{'\\', '"'}
backslashBS = []byte{'\\', '\\'}
posInf = []byte("inf")
negInf = []byte("-inf")
nan = []byte("nan")
)
type writer interface {
io.Writer
WriteByte(byte) error
}
// textWriter is an io.Writer that tracks its indentation level.
type textWriter struct {
ind int
complete bool // if the current position is a complete line
compact bool // whether to write out as a one-liner
w writer
}
func (w *textWriter) WriteString(s string) (n int, err error) {
if !strings.Contains(s, "\n") {
if !w.compact && w.complete {
w.writeIndent()
}
w.complete = false
return io.WriteString(w.w, s)
}
// WriteString is typically called without newlines, so this
// codepath and its copy are rare. We copy to avoid
// duplicating all of Write's logic here.
return w.Write([]byte(s))
}
func (w *textWriter) Write(p []byte) (n int, err error) {
newlines := bytes.Count(p, newline)
if newlines == 0 {
if !w.compact && w.complete {
w.writeIndent()
}
n, err = w.w.Write(p)
w.complete = false
return n, err
}
frags := bytes.SplitN(p, newline, newlines+1)
if w.compact {
for i, frag := range frags {
if i > 0 {
if err := w.w.WriteByte(' '); err != nil {
return n, err
}
n++
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
}
return n, nil
}
for i, frag := range frags {
if w.complete {
w.writeIndent()
}
nn, err := w.w.Write(frag)
n += nn
if err != nil {
return n, err
}
if i+1 < len(frags) {
if err := w.w.WriteByte('\n'); err != nil {
return n, err
}
n++
}
}
w.complete = len(frags[len(frags)-1]) == 0
return n, nil
}
func (w *textWriter) WriteByte(c byte) error {
if w.compact && c == '\n' {
c = ' '
}
if !w.compact && w.complete {
w.writeIndent()
}
err := w.w.WriteByte(c)
w.complete = c == '\n'
return err
}
func (w *textWriter) indent() { w.ind++ }
func (w *textWriter) unindent() {
if w.ind == 0 {
log.Print("proto: textWriter unindented too far")
return
}
w.ind--
}
func writeName(w *textWriter, props *Properties) error {
if _, err := w.WriteString(props.OrigName); err != nil {
return err
}
if props.Wire != "group" {
return w.WriteByte(':')
}
return nil
}
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
switch {
case ch == '.' || ch == '/' || ch == '_':
continue
case '0' <= ch && ch <= '9':
continue
case 'A' <= ch && ch <= 'Z':
continue
case 'a' <= ch && ch <= 'z':
continue
default:
return true
}
}
return false
}
// isAny reports whether sv is a google.protobuf.Any message
func isAny(sv reflect.Value) bool {
type wkt interface {
XXX_WellKnownType() string
}
t, ok := sv.Addr().Interface().(wkt)
return ok && t.XXX_WellKnownType() == "Any"
}
// writeProto3Any writes an expanded google.protobuf.Any message.
//
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
// required messages are not linked in).
//
// It returns (true, error) when sv was written in expanded format or an error
// was encountered.
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
turl := sv.FieldByName("TypeUrl")
val := sv.FieldByName("Value")
if !turl.IsValid() || !val.IsValid() {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
b, ok := val.Interface().([]byte)
if !ok {
return true, errors.New("proto: invalid google.protobuf.Any message")
}
parts := strings.Split(turl.String(), "/")
mt := MessageType(parts[len(parts)-1])
if mt == nil {
return false, nil
}
m := reflect.New(mt.Elem())
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
return false, nil
}
w.Write([]byte("["))
u := turl.String()
if requiresQuotes(u) {
writeString(w, u)
} else {
w.Write([]byte(u))
}
if w.compact {
w.Write([]byte("]:<"))
} else {
w.Write([]byte("]: <\n"))
w.ind++
}
if err := tm.writeStruct(w, m.Elem()); err != nil {
return true, err
}
if w.compact {
w.Write([]byte("> "))
} else {
w.ind--
w.Write([]byte(">\n"))
}
return true, nil
}
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
if tm.ExpandAny && isAny(sv) {
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
return err
}
}
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < sv.NumField(); i++ {
fv := sv.Field(i)
props := sprops.Prop[i]
name := st.Field(i).Name
if name == "XXX_NoUnkeyedLiteral" {
continue
}
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
// XXX_extensions map[int32]proto.Extension
// The first is handled here;
// the second is handled at the bottom of this function.
if name == "XXX_unrecognized" && !fv.IsNil() {
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Field not filled in. This could be an optional field or
// a required field that wasn't filled in. Either way, there
// isn't anything we can show for it.
continue
}
if fv.Kind() == reflect.Slice && fv.IsNil() {
// Repeated field that is empty, or a bytes field that is unused.
continue
}
if props.Repeated && fv.Kind() == reflect.Slice {
// Repeated field.
for j := 0; j < fv.Len(); j++ {
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
v := fv.Index(j)
if v.Kind() == reflect.Ptr && v.IsNil() {
// A nil message in a repeated field is not valid,
// but we can handle that more gracefully than panicking.
if _, err := w.Write([]byte("<nil>\n")); err != nil {
return err
}
continue
}
if err := tm.writeAny(w, v, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if fv.Kind() == reflect.Map {
// Map fields are rendered as a repeated struct with key/value fields.
keys := fv.MapKeys()
sort.Sort(mapKeys(keys))
for _, key := range keys {
val := fv.MapIndex(key)
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// open struct
if err := w.WriteByte('<'); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
// key
if _, err := w.WriteString("key:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
// nil values aren't legal, but we can avoid panicking because of them.
if val.Kind() != reflect.Ptr || !val.IsNil() {
// value
if _, err := w.WriteString("value:"); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// close struct
w.unindent()
if err := w.WriteByte('>'); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
continue
}
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
// empty bytes field
continue
}
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
// proto3 non-repeated scalar field; skip if zero value
if isProto3Zero(fv) {
continue
}
}
if fv.Kind() == reflect.Interface {
// Check if it is a oneof.
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
// fv is nil, or holds a pointer to generated struct.
// That generated struct has exactly one field,
// which has a protobuf struct tag.
if fv.IsNil() {
continue
}
inner := fv.Elem().Elem() // interface -> *T -> T
tag := inner.Type().Field(0).Tag.Get("protobuf")
props = new(Properties) // Overwrite the outer props var, but not its pointee.
props.Parse(tag)
// Write the value in the oneof, not the oneof itself.
fv = inner.Field(0)
// Special case to cope with malformed messages gracefully:
// If the value in the oneof is a nil pointer, don't panic
// in writeAny.
if fv.Kind() == reflect.Ptr && fv.IsNil() {
// Use errors.New so writeAny won't render quotes.
msg := errors.New("/* nil */")
fv = reflect.ValueOf(&msg).Elem()
}
}
}
if err := writeName(w, props); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
}
// Extensions (the XXX_extensions field).
pv := sv.Addr()
if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
}
return nil
}
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
// Floats have special cases.
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
x := v.Float()
var b []byte
switch {
case math.IsInf(x, 1):
b = posInf
case math.IsInf(x, -1):
b = negInf
case math.IsNaN(x):
b = nan
}
if b != nil {
_, err := w.Write(b)
return err
}
// Other values are handled below.
}
// We don't attempt to serialise every possible value type; only those
// that can occur in protocol buffers.
switch v.Kind() {
case reflect.Slice:
// Should only be a []byte; repeated fields are handled in writeStruct.
if err := writeString(w, string(v.Bytes())); err != nil {
return err
}
case reflect.String:
if err := writeString(w, v.String()); err != nil {
return err
}
case reflect.Struct:
// Required/optional group/message.
var bra, ket byte = '<', '>'
if props != nil && props.Wire == "group" {
bra, ket = '{', '}'
}
if err := w.WriteByte(bra); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte('\n'); err != nil {
return err
}
}
w.indent()
if v.CanAddr() {
// Calling v.Interface on a struct causes the reflect package to
// copy the entire struct. This is racy with the new Marshaler
// since we atomically update the XXX_sizecache.
//
// Thus, we retrieve a pointer to the struct if possible to avoid
// a race since v.Interface on the pointer doesn't copy the struct.
//
// If v is not addressable, then we are not worried about a race
// since it implies that the binary Marshaler cannot possibly be
// mutating this value.
v = v.Addr()
}
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = w.Write(text); err != nil {
return err
}
} else {
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if err := tm.writeStruct(w, v); err != nil {
return err
}
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
return err
}
default:
_, err := fmt.Fprint(w, v.Interface())
return err
}
return nil
}
// equivalent to C's isprint.
func isprint(c byte) bool {
return c >= 0x20 && c < 0x7f
}
// writeString writes a string in the protocol buffer text format.
// It is similar to strconv.Quote except we don't use Go escape sequences,
// we treat the string as a byte sequence, and we use octal escapes.
// These differences are to maintain interoperability with the other
// languages' implementations of the text format.
func writeString(w *textWriter, s string) error {
// use WriteByte here to get any needed indent
if err := w.WriteByte('"'); err != nil {
return err
}
// Loop over the bytes, not the runes.
for i := 0; i < len(s); i++ {
var err error
// Divergence from C++: we don't escape apostrophes.
// There's no need to escape them, and the C++ parser
// copes with a naked apostrophe.
switch c := s[i]; c {
case '\n':
_, err = w.w.Write(backslashN)
case '\r':
_, err = w.w.Write(backslashR)
case '\t':
_, err = w.w.Write(backslashT)
case '"':
_, err = w.w.Write(backslashDQ)
case '\\':
_, err = w.w.Write(backslashBS)
default:
if isprint(c) {
err = w.w.WriteByte(c)
} else {
_, err = fmt.Fprintf(w.w, "\\%03o", c)
}
}
if err != nil {
return err
}
}
return w.WriteByte('"')
}
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
if !w.compact {
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
return err
}
}
b := NewBuffer(data)
for b.index < len(b.buf) {
x, err := b.DecodeVarint()
if err != nil {
_, err := fmt.Fprintf(w, "/* %v */\n", err)
return err
}
wire, tag := x&7, x>>3
if wire == WireEndGroup {
w.unindent()
if _, err := w.Write(endBraceNewline); err != nil {
return err
}
continue
}
if _, err := fmt.Fprint(w, tag); err != nil {
return err
}
if wire != WireStartGroup {
if err := w.WriteByte(':'); err != nil {
return err
}
}
if !w.compact || wire == WireStartGroup {
if err := w.WriteByte(' '); err != nil {
return err
}
}
switch wire {
case WireBytes:
buf, e := b.DecodeRawBytes(false)
if e == nil {
_, err = fmt.Fprintf(w, "%q", buf)
} else {
_, err = fmt.Fprintf(w, "/* %v */", e)
}
case WireFixed32:
x, err = b.DecodeFixed32()
err = writeUnknownInt(w, x, err)
case WireFixed64:
x, err = b.DecodeFixed64()
err = writeUnknownInt(w, x, err)
case WireStartGroup:
err = w.WriteByte('{')
w.indent()
case WireVarint:
x, err = b.DecodeVarint()
err = writeUnknownInt(w, x, err)
default:
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
}
if err != nil {
return err
}
if err = w.WriteByte('\n'); err != nil {
return err
}
}
return nil
}
func writeUnknownInt(w *textWriter, x uint64, err error) error {
if err == nil {
_, err = fmt.Fprint(w, x)
} else {
_, err = fmt.Fprintf(w, "/* %v */", err)
}
return err
}
type int32Slice []int32
func (s int32Slice) Len() int { return len(s) }
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// writeExtensions writes all the extensions in pv.
// pv is assumed to be a pointer to a protocol message struct that is extendable.
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
emap := extensionMaps[pv.Type().Elem()]
ep, _ := extendable(pv.Interface())
// Order the extensions by ID.
// This isn't strictly necessary, but it will give us
// canonical output, which will also make testing easier.
m, mu := ep.extensionsRead()
if m == nil {
return nil
}
mu.Lock()
ids := make([]int32, 0, len(m))
for id := range m {
ids = append(ids, id)
}
sort.Sort(int32Slice(ids))
mu.Unlock()
for _, extNum := range ids {
ext := m[extNum]
var desc *ExtensionDesc
if emap != nil {
desc = emap[extNum]
}
if desc == nil {
// Unknown extension.
if err := writeUnknownStruct(w, ext.enc); err != nil {
return err
}
continue
}
pb, err := GetExtension(ep, desc)
if err != nil {
return fmt.Errorf("failed getting extension: %v", err)
}
// Repeated extensions will appear as a slice.
if !desc.repeated() {
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
return err
}
} else {
v := reflect.ValueOf(pb)
for i := 0; i < v.Len(); i++ {
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
return err
}
}
}
}
return nil
}
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
return err
}
if !w.compact {
if err := w.WriteByte(' '); err != nil {
return err
}
}
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
return err
}
if err := w.WriteByte('\n'); err != nil {
return err
}
return nil
}
func (w *textWriter) writeIndent() {
if !w.complete {
return
}
remain := w.ind * 2
for remain > 0 {
n := remain
if n > len(spaces) {
n = len(spaces)
}
w.w.Write(spaces[:n])
remain -= n
}
w.complete = false
}
// TextMarshaler is a configurable text format marshaler.
type TextMarshaler struct {
Compact bool // use compact text format (one line).
ExpandAny bool // expand google.protobuf.Any messages of known types
}
// Marshal writes a given protocol buffer in text format.
// The only errors returned are from w.
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
val := reflect.ValueOf(pb)
if pb == nil || val.IsNil() {
w.Write([]byte("<nil>"))
return nil
}
var bw *bufio.Writer
ww, ok := w.(writer)
if !ok {
bw = bufio.NewWriter(w)
ww = bw
}
aw := &textWriter{
w: ww,
complete: true,
compact: tm.Compact,
}
if etm, ok := pb.(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
return err
}
if _, err = aw.Write(text); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Dereference the received pointer so we don't have outer < and >.
v := reflect.Indirect(val)
if err := tm.writeStruct(aw, v); err != nil {
return err
}
if bw != nil {
return bw.Flush()
}
return nil
}
// Text is the same as Marshal, but returns the string directly.
func (tm *TextMarshaler) Text(pb Message) string {
var buf bytes.Buffer
tm.Marshal(&buf, pb)
return buf.String()
}
var (
defaultTextMarshaler = TextMarshaler{}
compactTextMarshaler = TextMarshaler{Compact: true}
)
// TODO: consider removing some of the Marshal functions below.
// MarshalText writes a given protocol buffer in text format.
// The only errors returned are from w.
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
// MarshalTextString is the same as MarshalText, but returns the string directly.
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
// CompactText writes a given protocol buffer in compact text format (one line).
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
// CompactTextString is the same as CompactText, but returns the string directly.
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }

View File

@ -1,880 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2010 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
// Functions for parsing the Text protocol buffer format.
// TODO: message sets.
import (
"encoding"
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
// Error string emitted when deserializing Any and fields are already set
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
type ParseError struct {
Message string
Line int // 1-based line number
Offset int // 0-based byte offset from start of input
}
func (p *ParseError) Error() string {
if p.Line == 1 {
// show offset only for first line
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
}
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
}
type token struct {
value string
err *ParseError
line int // line number
offset int // byte number from start of input, not start of line
unquoted string // the unquoted version of value, if it was a quoted string
}
func (t *token) String() string {
if t.err == nil {
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
}
return fmt.Sprintf("parse error: %v", t.err)
}
type textParser struct {
s string // remaining input
done bool // whether the parsing is finished (success or error)
backed bool // whether back() was called
offset, line int
cur token
}
func newTextParser(s string) *textParser {
p := new(textParser)
p.s = s
p.line = 1
p.cur.line = 1
return p
}
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
p.cur.err = pe
p.done = true
return pe
}
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
func isIdentOrNumberChar(c byte) bool {
switch {
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
return true
case '0' <= c && c <= '9':
return true
}
switch c {
case '-', '+', '.', '_':
return true
}
return false
}
func isWhitespace(c byte) bool {
switch c {
case ' ', '\t', '\n', '\r':
return true
}
return false
}
func isQuote(c byte) bool {
switch c {
case '"', '\'':
return true
}
return false
}
func (p *textParser) skipWhitespace() {
i := 0
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
if p.s[i] == '#' {
// comment; skip to end of line or input
for i < len(p.s) && p.s[i] != '\n' {
i++
}
if i == len(p.s) {
break
}
}
if p.s[i] == '\n' {
p.line++
}
i++
}
p.offset += i
p.s = p.s[i:len(p.s)]
if len(p.s) == 0 {
p.done = true
}
}
func (p *textParser) advance() {
// Skip whitespace
p.skipWhitespace()
if p.done {
return
}
// Start of non-whitespace
p.cur.err = nil
p.cur.offset, p.cur.line = p.offset, p.line
p.cur.unquoted = ""
switch p.s[0] {
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
// Single symbol
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
case '"', '\'':
// Quoted string
i := 1
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
if p.s[i] == '\\' && i+1 < len(p.s) {
// skip escaped char
i++
}
i++
}
if i >= len(p.s) || p.s[i] != p.s[0] {
p.errorf("unmatched quote")
return
}
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
if err != nil {
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
return
}
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
p.cur.unquoted = unq
default:
i := 0
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
i++
}
if i == 0 {
p.errorf("unexpected byte %#x", p.s[0])
return
}
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
}
p.offset += len(p.cur.value)
}
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
)
func unquoteC(s string, quote rune) (string, error) {
// This is based on C++'s tokenizer.cc.
// Despite its name, this is *not* parsing C syntax.
// For instance, "\0" is an invalid quoted string.
// Avoid allocation in trivial cases.
simple := true
for _, r := range s {
if r == '\\' || r == quote {
simple = false
break
}
}
if simple {
return s, nil
}
buf := make([]byte, 0, 3*len(s)/2)
for len(s) > 0 {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", errBadUTF8
}
s = s[n:]
if r != '\\' {
if r < utf8.RuneSelf {
buf = append(buf, byte(r))
} else {
buf = append(buf, string(r)...)
}
continue
}
ch, tail, err := unescape(s)
if err != nil {
return "", err
}
buf = append(buf, ch...)
s = tail
}
return string(buf), nil
}
func unescape(s string) (ch string, tail string, err error) {
r, n := utf8.DecodeRuneInString(s)
if r == utf8.RuneError && n == 1 {
return "", "", errBadUTF8
}
s = s[n:]
switch r {
case 'a':
return "\a", s, nil
case 'b':
return "\b", s, nil
case 'f':
return "\f", s, nil
case 'n':
return "\n", s, nil
case 'r':
return "\r", s, nil
case 't':
return "\t", s, nil
case 'v':
return "\v", s, nil
case '?':
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
ss := string(r) + s[:2]
s = s[2:]
i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
case 'x', 'X', 'u', 'U':
var n int
switch r {
case 'x', 'X':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
if len(s) < n {
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
ss := s[:n]
s = s[n:]
i, err := strconv.ParseUint(ss, 16, 64)
if err != nil {
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
}
if r == 'x' || r == 'X' {
return string([]byte{byte(i)}), s, nil
}
if i > utf8.MaxRune {
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
}
return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
// Advances the parser and returns the new current token.
func (p *textParser) next() *token {
if p.backed || p.done {
p.backed = false
return &p.cur
}
p.advance()
if p.done {
p.cur.value = ""
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
// Look for multiple quoted strings separated by whitespace,
// and concatenate them.
cat := p.cur
for {
p.skipWhitespace()
if p.done || !isQuote(p.s[0]) {
break
}
p.advance()
if p.cur.err != nil {
return &p.cur
}
cat.value += " " + p.cur.value
cat.unquoted += p.cur.unquoted
}
p.done = false // parser may have seen EOF, but we want to return cat
p.cur = cat
}
return &p.cur
}
func (p *textParser) consumeToken(s string) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != s {
p.back()
return p.errorf("expected %q, found %q", s, tok.value)
}
return nil
}
// Return a RequiredNotSetError indicating which required field was not set.
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
st := sv.Type()
sprops := GetProperties(st)
for i := 0; i < st.NumField(); i++ {
if !isNil(sv.Field(i)) {
continue
}
props := sprops.Prop[i]
if props.Required {
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
}
}
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
}
// Returns the index in the struct for the named field, as well as the parsed tag properties.
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
i, ok := sprops.decoderOrigNames[name]
if ok {
return i, sprops.Prop[i], true
}
return -1, nil, false
}
// Consume a ':' from the input stream (if the next token is a colon),
// returning an error if a colon is needed but not present.
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ":" {
// Colon is optional when the field is a group or message.
needColon := true
switch props.Wire {
case "group":
needColon = false
case "bytes":
// A "bytes" field is either a message, a string, or a repeated field;
// those three become *T, *string and []T respectively, so we can check for
// this field being a pointer to a non-string.
if typ.Kind() == reflect.Ptr {
// *T or *string
if typ.Elem().Kind() == reflect.String {
break
}
} else if typ.Kind() == reflect.Slice {
// []T or []*T
if typ.Elem().Kind() != reflect.Ptr {
break
}
} else if typ.Kind() == reflect.String {
// The proto3 exception is for a string field,
// which requires a colon.
break
}
needColon = false
}
if needColon {
return p.errorf("expected ':', found %q", tok.value)
}
p.back()
}
return nil
}
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
st := sv.Type()
sprops := GetProperties(st)
reqCount := sprops.reqCount
var reqFieldErr error
fieldSet := make(map[string]bool)
// A struct is a sequence of "name: value", terminated by one of
// '>' or '}', or the end of the input. A name may also be
// "[extension]" or "[type/url]".
//
// The whole struct can also be an expanded Any message, like:
// [type/url] < ... struct contents ... >
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
if tok.value == "[" {
// Looks like an extension or an Any.
//
// TODO: Check whether we need to handle
// namespace rooted names (e.g. ".something.Foo").
extName, err := p.consumeExtName()
if err != nil {
return err
}
if s := strings.LastIndex(extName, "/"); s >= 0 {
// If it contains a slash, it's an Any type URL.
messageName := extName[s+1:]
mt := MessageType(messageName)
if mt == nil {
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
}
tok = p.next()
if tok.err != nil {
return tok.err
}
// consume an optional colon
if tok.value == ":" {
tok = p.next()
if tok.err != nil {
return tok.err
}
}
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
v := reflect.New(mt.Elem())
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
return pe
}
b, err := Marshal(v.Interface().(Message))
if err != nil {
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
}
if fieldSet["type_url"] {
return p.errorf(anyRepeatedlyUnpacked, "type_url")
}
if fieldSet["value"] {
return p.errorf(anyRepeatedlyUnpacked, "value")
}
sv.FieldByName("TypeUrl").SetString(extName)
sv.FieldByName("Value").SetBytes(b)
fieldSet["type_url"] = true
fieldSet["value"] = true
continue
}
var desc *ExtensionDesc
// This could be faster, but it's functional.
// TODO: Do something smarter than a linear scan.
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
if d.Name == extName {
desc = d
break
}
}
if desc == nil {
return p.errorf("unrecognized extension %q", extName)
}
props := &Properties{}
props.Parse(desc.Tag)
typ := reflect.TypeOf(desc.ExtensionType)
if err := p.checkForColon(props, typ); err != nil {
return err
}
rep := desc.repeated()
// Read the extension structure, and set it in
// the value we're constructing.
var ext reflect.Value
if !rep {
ext = reflect.New(typ).Elem()
} else {
ext = reflect.New(typ.Elem()).Elem()
}
if err := p.readAny(ext, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
ep := sv.Addr().Interface().(Message)
if !rep {
SetExtension(ep, desc, ext.Interface())
} else {
old, err := GetExtension(ep, desc)
var sl reflect.Value
if err == nil {
sl = reflect.ValueOf(old) // existing slice
} else {
sl = reflect.MakeSlice(typ, 0, 1)
}
sl = reflect.Append(sl, ext)
SetExtension(ep, desc, sl.Interface())
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
continue
}
// This is a normal, non-extension field.
name := tok.value
var dst reflect.Value
fi, props, ok := structFieldByName(sprops, name)
if ok {
dst = sv.Field(fi)
} else if oop, ok := sprops.OneofTypes[name]; ok {
// It is a oneof.
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
field := sv.Field(oop.Field)
if !field.IsNil() {
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
}
field.Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
}
if dst.Kind() == reflect.Map {
// Consume any colon.
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Construct the map if it doesn't already exist.
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
key := reflect.New(dst.Type().Key()).Elem()
val := reflect.New(dst.Type().Elem()).Elem()
// The map entry should be this sequence of tokens:
// < key : KEY value : VALUE >
// However, implementations may omit key or value, and technically
// we should support them in any order. See b/28924776 for a time
// this went wrong.
tok := p.next()
var terminator string
switch tok.value {
case "<":
terminator = ">"
case "{":
terminator = "}"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
for {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == terminator {
break
}
switch tok.value {
case "key":
if err := p.consumeToken(":"); err != nil {
return err
}
if err := p.readAny(key, props.mkeyprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
case "value":
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
return err
}
if err := p.readAny(val, props.mvalprop); err != nil {
return err
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
default:
p.back()
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
}
}
dst.SetMapIndex(key, val)
continue
}
// Check that it's not already set if it's not a repeated field.
if !props.Repeated && fieldSet[name] {
return p.errorf("non-repeated field %q was repeated", name)
}
if err := p.checkForColon(props, dst.Type()); err != nil {
return err
}
// Parse into the field.
fieldSet[name] = true
if err := p.readAny(dst, props); err != nil {
if _, ok := err.(*RequiredNotSetError); !ok {
return err
}
reqFieldErr = err
}
if props.Required {
reqCount--
}
if err := p.consumeOptionalSeparator(); err != nil {
return err
}
}
if reqCount > 0 {
return p.missingRequiredFieldError(sv)
}
return reqFieldErr
}
// consumeExtName consumes extension name or expanded Any type URL and the
// following ']'. It returns the name or URL consumed.
func (p *textParser) consumeExtName() (string, error) {
tok := p.next()
if tok.err != nil {
return "", tok.err
}
// If extension name or type url is quoted, it's a single token.
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
if err != nil {
return "", err
}
return name, p.consumeToken("]")
}
// Consume everything up to "]"
var parts []string
for tok.value != "]" {
parts = append(parts, tok.value)
tok = p.next()
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
if p.done && tok.value != "]" {
return "", p.errorf("unclosed type_url or extension name")
}
}
return strings.Join(parts, ""), nil
}
// consumeOptionalSeparator consumes an optional semicolon or comma.
// It is used in readStruct to provide backward compatibility.
func (p *textParser) consumeOptionalSeparator() error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value != ";" && tok.value != "," {
p.back()
}
return nil
}
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "" {
return p.errorf("unexpected EOF")
}
switch fv := v; fv.Kind() {
case reflect.Slice:
at := v.Type()
if at.Elem().Kind() == reflect.Uint8 {
// Special case for []byte
if tok.value[0] != '"' && tok.value[0] != '\'' {
// Deliberately written out here, as the error after
// this switch statement would write "invalid []byte: ...",
// which is not as user-friendly.
return p.errorf("invalid string: %v", tok.value)
}
bytes := []byte(tok.unquoted)
fv.Set(reflect.ValueOf(bytes))
return nil
}
// Repeated field.
if tok.value == "[" {
// Repeated field with list notation, like [1,2,3].
for {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
err := p.readAny(fv.Index(fv.Len()-1), props)
if err != nil {
return err
}
tok := p.next()
if tok.err != nil {
return tok.err
}
if tok.value == "]" {
break
}
if tok.value != "," {
return p.errorf("Expected ']' or ',' found %q", tok.value)
}
}
return nil
}
// One value of the repeated field.
p.back()
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
// true/1/t/True or false/f/0/False.
switch tok.value {
case "true", "1", "t", "True":
fv.SetBool(true)
return nil
case "false", "0", "f", "False":
fv.SetBool(false)
return nil
}
case reflect.Float32, reflect.Float64:
v := tok.value
// Ignore 'f' for compatibility with output generated by C++, but don't
// remove 'f' when the value is "-inf" or "inf".
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
v = v[:len(v)-1]
}
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
fv.SetFloat(f)
return nil
}
case reflect.Int32:
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
fv.SetInt(x)
return nil
}
if len(props.Enum) == 0 {
break
}
m, ok := enumValueMaps[props.Enum]
if !ok {
break
}
x, ok := m[tok.value]
if !ok {
break
}
fv.SetInt(int64(x))
return nil
case reflect.Int64:
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
fv.SetInt(x)
return nil
}
case reflect.Ptr:
// A basic field (indirected through pointer), or a repeated message/group
p.back()
fv.Set(reflect.New(fv.Type().Elem()))
return p.readAny(fv.Elem(), props)
case reflect.String:
if tok.value[0] == '"' || tok.value[0] == '\'' {
fv.SetString(tok.unquoted)
return nil
}
case reflect.Struct:
var terminator string
switch tok.value {
case "{":
terminator = "}"
case "<":
terminator = ">"
default:
return p.errorf("expected '{' or '<', found %q", tok.value)
}
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
return nil
}
case reflect.Uint64:
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
fv.SetUint(x)
return nil
}
}
return p.errorf("invalid %v: %v", v.Type(), tok.value)
}
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
// before starting to unmarshal, so any existing data in pb is always removed.
// If a required field is not set and no other error occurs,
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
return um.UnmarshalText([]byte(s))
}
pb.Reset()
v := reflect.ValueOf(pb)
return newTextParser(s).readStruct(v.Elem(), "")
}

File diff suppressed because it is too large Load Diff

View File

@ -1,872 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Author: kenton@google.com (Kenton Varda)
// Based on original Protocol Buffers design by
// Sanjay Ghemawat, Jeff Dean, and others.
//
// The messages in this file describe the definitions found in .proto files.
// A valid .proto file can be translated directly to a FileDescriptorProto
// without any other information (e.g. without reading its imports).
syntax = "proto2";
package google.protobuf;
option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
option java_package = "com.google.protobuf";
option java_outer_classname = "DescriptorProtos";
option csharp_namespace = "Google.Protobuf.Reflection";
option objc_class_prefix = "GPB";
option cc_enable_arenas = true;
// descriptor.proto must be optimized for speed because reflection-based
// algorithms don't work during bootstrapping.
option optimize_for = SPEED;
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
message FileDescriptorSet {
repeated FileDescriptorProto file = 1;
}
// Describes a complete .proto file.
message FileDescriptorProto {
optional string name = 1; // file name, relative to root of source tree
optional string package = 2; // e.g. "foo", "foo.bar", etc.
// Names of files imported by this file.
repeated string dependency = 3;
// Indexes of the public imported files in the dependency list above.
repeated int32 public_dependency = 10;
// Indexes of the weak imported files in the dependency list.
// For Google-internal migration only. Do not use.
repeated int32 weak_dependency = 11;
// All top-level definitions in this file.
repeated DescriptorProto message_type = 4;
repeated EnumDescriptorProto enum_type = 5;
repeated ServiceDescriptorProto service = 6;
repeated FieldDescriptorProto extension = 7;
optional FileOptions options = 8;
// This field contains optional information about the original source code.
// You may safely remove this entire field without harming runtime
// functionality of the descriptors -- the information is needed only by
// development tools.
optional SourceCodeInfo source_code_info = 9;
// The syntax of the proto file.
// The supported values are "proto2" and "proto3".
optional string syntax = 12;
}
// Describes a message type.
message DescriptorProto {
optional string name = 1;
repeated FieldDescriptorProto field = 2;
repeated FieldDescriptorProto extension = 6;
repeated DescriptorProto nested_type = 3;
repeated EnumDescriptorProto enum_type = 4;
message ExtensionRange {
optional int32 start = 1;
optional int32 end = 2;
optional ExtensionRangeOptions options = 3;
}
repeated ExtensionRange extension_range = 5;
repeated OneofDescriptorProto oneof_decl = 8;
optional MessageOptions options = 7;
// Range of reserved tag numbers. Reserved tag numbers may not be used by
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
message ReservedRange {
optional int32 start = 1; // Inclusive.
optional int32 end = 2; // Exclusive.
}
repeated ReservedRange reserved_range = 9;
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
repeated string reserved_name = 10;
}
message ExtensionRangeOptions {
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
// Describes a field within a message.
message FieldDescriptorProto {
enum Type {
// 0 is reserved for errors.
// Order is weird for historical reasons.
TYPE_DOUBLE = 1;
TYPE_FLOAT = 2;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
// negative values are likely.
TYPE_INT64 = 3;
TYPE_UINT64 = 4;
// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
// negative values are likely.
TYPE_INT32 = 5;
TYPE_FIXED64 = 6;
TYPE_FIXED32 = 7;
TYPE_BOOL = 8;
TYPE_STRING = 9;
// Tag-delimited aggregate.
// Group type is deprecated and not supported in proto3. However, Proto3
// implementations should still be able to parse the group wire format and
// treat group fields as unknown fields.
TYPE_GROUP = 10;
TYPE_MESSAGE = 11; // Length-delimited aggregate.
// New in version 2.
TYPE_BYTES = 12;
TYPE_UINT32 = 13;
TYPE_ENUM = 14;
TYPE_SFIXED32 = 15;
TYPE_SFIXED64 = 16;
TYPE_SINT32 = 17; // Uses ZigZag encoding.
TYPE_SINT64 = 18; // Uses ZigZag encoding.
};
enum Label {
// 0 is reserved for errors
LABEL_OPTIONAL = 1;
LABEL_REQUIRED = 2;
LABEL_REPEATED = 3;
};
optional string name = 1;
optional int32 number = 3;
optional Label label = 4;
// If type_name is set, this need not be set. If both this and type_name
// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
optional Type type = 5;
// For message and enum types, this is the name of the type. If the name
// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
// rules are used to find the type (i.e. first the nested types within this
// message are searched, then within the parent, on up to the root
// namespace).
optional string type_name = 6;
// For extensions, this is the name of the type being extended. It is
// resolved in the same manner as type_name.
optional string extendee = 2;
// For numeric types, contains the original text representation of the value.
// For booleans, "true" or "false".
// For strings, contains the default text contents (not escaped in any way).
// For bytes, contains the C escaped value. All bytes >= 128 are escaped.
// TODO(kenton): Base-64 encode?
optional string default_value = 7;
// If set, gives the index of a oneof in the containing type's oneof_decl
// list. This field is a member of that oneof.
optional int32 oneof_index = 9;
// JSON name of this field. The value is set by protocol compiler. If the
// user has set a "json_name" option on this field, that option's value
// will be used. Otherwise, it's deduced from the field's name by converting
// it to camelCase.
optional string json_name = 10;
optional FieldOptions options = 8;
}
// Describes a oneof.
message OneofDescriptorProto {
optional string name = 1;
optional OneofOptions options = 2;
}
// Describes an enum type.
message EnumDescriptorProto {
optional string name = 1;
repeated EnumValueDescriptorProto value = 2;
optional EnumOptions options = 3;
// Range of reserved numeric values. Reserved values may not be used by
// entries in the same enum. Reserved ranges may not overlap.
//
// Note that this is distinct from DescriptorProto.ReservedRange in that it
// is inclusive such that it can appropriately represent the entire int32
// domain.
message EnumReservedRange {
optional int32 start = 1; // Inclusive.
optional int32 end = 2; // Inclusive.
}
// Range of reserved numeric values. Reserved numeric values may not be used
// by enum values in the same enum declaration. Reserved ranges may not
// overlap.
repeated EnumReservedRange reserved_range = 4;
// Reserved enum value names, which may not be reused. A given name may only
// be reserved once.
repeated string reserved_name = 5;
}
// Describes a value within an enum.
message EnumValueDescriptorProto {
optional string name = 1;
optional int32 number = 2;
optional EnumValueOptions options = 3;
}
// Describes a service.
message ServiceDescriptorProto {
optional string name = 1;
repeated MethodDescriptorProto method = 2;
optional ServiceOptions options = 3;
}
// Describes a method of a service.
message MethodDescriptorProto {
optional string name = 1;
// Input and output type names. These are resolved in the same way as
// FieldDescriptorProto.type_name, but must refer to a message type.
optional string input_type = 2;
optional string output_type = 3;
optional MethodOptions options = 4;
// Identifies if client streams multiple client messages
optional bool client_streaming = 5 [default=false];
// Identifies if server streams multiple server messages
optional bool server_streaming = 6 [default=false];
}
// ===================================================================
// Options
// Each of the definitions above may have "options" attached. These are
// just annotations which may cause code to be generated slightly differently
// or may contain hints for code that manipulates protocol messages.
//
// Clients may define custom options as extensions of the *Options messages.
// These extensions may not yet be known at parsing time, so the parser cannot
// store the values in them. Instead it stores them in a field in the *Options
// message called uninterpreted_option. This field must have the same name
// across all *Options messages. We then use this field to populate the
// extensions when we build a descriptor, at which point all protos have been
// parsed and so all extensions are known.
//
// Extension numbers for custom options may be chosen as follows:
// * For options which will only be used within a single application or
// organization, or for experimental options, use field numbers 50000
// through 99999. It is up to you to ensure that you do not use the
// same number for multiple options.
// * For options which will be published and used publicly by multiple
// independent entities, e-mail protobuf-global-extension-registry@google.com
// to reserve extension numbers. Simply provide your project name (e.g.
// Objective-C plugin) and your project website (if available) -- there's no
// need to explain how you intend to use them. Usually you only need one
// extension number. You can declare multiple options with only one extension
// number by putting them in a sub-message. See the Custom Options section of
// the docs for examples:
// https://developers.google.com/protocol-buffers/docs/proto#options
// If this turns out to be popular, a web service will be set up
// to automatically assign option numbers.
message FileOptions {
// Sets the Java package where classes generated from this .proto will be
// placed. By default, the proto package is used, but this is often
// inappropriate because proto packages do not normally start with backwards
// domain names.
optional string java_package = 1;
// If set, all the classes from the .proto file are wrapped in a single
// outer class with the given name. This applies to both Proto1
// (equivalent to the old "--one_java_file" option) and Proto2 (where
// a .proto always translates to a single class, but you may want to
// explicitly choose the class name).
optional string java_outer_classname = 8;
// If set true, then the Java code generator will generate a separate .java
// file for each top-level message, enum, and service defined in the .proto
// file. Thus, these types will *not* be nested inside the outer class
// named by java_outer_classname. However, the outer class will still be
// generated to contain the file's getDescriptor() method as well as any
// top-level extensions defined in the file.
optional bool java_multiple_files = 10 [default=false];
// This option does nothing.
optional bool java_generate_equals_and_hash = 20 [deprecated=true];
// If set true, then the Java2 code generator will generate code that
// throws an exception whenever an attempt is made to assign a non-UTF-8
// byte sequence to a string field.
// Message reflection will do the same.
// However, an extension field still accepts non-UTF-8 byte sequences.
// This option has no effect on when used with the lite runtime.
optional bool java_string_check_utf8 = 27 [default=false];
// Generated classes can be optimized for speed or code size.
enum OptimizeMode {
SPEED = 1; // Generate complete code for parsing, serialization,
// etc.
CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
}
optional OptimizeMode optimize_for = 9 [default=SPEED];
// Sets the Go package where structs generated from this .proto will be
// placed. If omitted, the Go package will be derived from the following:
// - The basename of the package import path, if provided.
// - Otherwise, the package statement in the .proto file, if present.
// - Otherwise, the basename of the .proto file, without extension.
optional string go_package = 11;
// Should generic services be generated in each language? "Generic" services
// are not specific to any particular RPC system. They are generated by the
// main code generators in each language (without additional plugins).
// Generic services were the only kind of service generation supported by
// early versions of google.protobuf.
//
// Generic services are now considered deprecated in favor of using plugins
// that generate code specific to your particular RPC system. Therefore,
// these default to false. Old code which depends on generic services should
// explicitly set them to true.
optional bool cc_generic_services = 16 [default=false];
optional bool java_generic_services = 17 [default=false];
optional bool py_generic_services = 18 [default=false];
optional bool php_generic_services = 42 [default=false];
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
// least, this is a formalization for deprecating files.
optional bool deprecated = 23 [default=false];
// Enables the use of arenas for the proto messages in this file. This applies
// only to generated classes for C++.
optional bool cc_enable_arenas = 31 [default=false];
// Sets the objective c class prefix which is prepended to all objective c
// generated classes from this .proto. There is no default.
optional string objc_class_prefix = 36;
// Namespace for generated classes; defaults to the package.
optional string csharp_namespace = 37;
// By default Swift generators will take the proto package and CamelCase it
// replacing '.' with underscore and use that to prefix the types/symbols
// defined. When this options is provided, they will use this value instead
// to prefix the types/symbols defined.
optional string swift_prefix = 39;
// Sets the php class prefix which is prepended to all php generated classes
// from this .proto. Default is empty.
optional string php_class_prefix = 40;
// Use this option to change the namespace of php generated classes. Default
// is empty. When this option is empty, the package name will be used for
// determining the namespace.
optional string php_namespace = 41;
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message.
// See the documentation for the "Options" section above.
extensions 1000 to max;
reserved 38;
}
message MessageOptions {
// Set true to use the old proto1 MessageSet wire format for extensions.
// This is provided for backwards-compatibility with the MessageSet wire
// format. You should not use this for any other reason: It's less
// efficient, has fewer features, and is more complicated.
//
// The message must be defined exactly as follows:
// message Foo {
// option message_set_wire_format = true;
// extensions 4 to max;
// }
// Note that the message cannot have any defined fields; MessageSets only
// have extensions.
//
// All extensions of your type must be singular messages; e.g. they cannot
// be int32s, enums, or repeated messages.
//
// Because this is an option, the above two restrictions are not enforced by
// the protocol compiler.
optional bool message_set_wire_format = 1 [default=false];
// Disables the generation of the standard "descriptor()" accessor, which can
// conflict with a field of the same name. This is meant to make migration
// from proto1 easier; new code should avoid fields named "descriptor".
optional bool no_standard_descriptor_accessor = 2 [default=false];
// Is this message deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the message, or it will be completely ignored; in the very least,
// this is a formalization for deprecating messages.
optional bool deprecated = 3 [default=false];
// Whether the message is an automatically generated map entry type for the
// maps field.
//
// For maps fields:
// map<KeyType, ValueType> map_field = 1;
// The parsed descriptor looks like:
// message MapFieldEntry {
// option map_entry = true;
// optional KeyType key = 1;
// optional ValueType value = 2;
// }
// repeated MapFieldEntry map_field = 1;
//
// Implementations may choose not to generate the map_entry=true message, but
// use a native map in the target language to hold the keys and values.
// The reflection APIs in such implementions still need to work as
// if the field is a repeated message field.
//
// NOTE: Do not set the option in .proto files. Always use the maps syntax
// instead. The option should only be implicitly set by the proto compiler
// parser.
optional bool map_entry = 7;
reserved 8; // javalite_serializable
reserved 9; // javanano_as_lite
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message FieldOptions {
// The ctype option instructs the C++ code generator to use a different
// representation of the field than it normally would. See the specific
// options below. This option is not yet implemented in the open source
// release -- sorry, we'll try to include it in a future version!
optional CType ctype = 1 [default = STRING];
enum CType {
// Default mode.
STRING = 0;
CORD = 1;
STRING_PIECE = 2;
}
// The packed option can be enabled for repeated primitive fields to enable
// a more efficient representation on the wire. Rather than repeatedly
// writing the tag and type for each element, the entire array is encoded as
// a single length-delimited blob. In proto3, only explicit setting it to
// false will avoid using packed encoding.
optional bool packed = 2;
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
// is represented as JavaScript string, which avoids loss of precision that
// can happen when a large value is converted to a floating point JavaScript.
// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
// use the JavaScript "number" type. The behavior of the default option
// JS_NORMAL is implementation dependent.
//
// This option is an enum to permit additional types to be added, e.g.
// goog.math.Integer.
optional JSType jstype = 6 [default = JS_NORMAL];
enum JSType {
// Use the default type.
JS_NORMAL = 0;
// Use JavaScript strings.
JS_STRING = 1;
// Use JavaScript numbers.
JS_NUMBER = 2;
}
// Should this field be parsed lazily? Lazy applies only to message-type
// fields. It means that when the outer message is initially parsed, the
// inner message's contents will not be parsed but instead stored in encoded
// form. The inner message will actually be parsed when it is first accessed.
//
// This is only a hint. Implementations are free to choose whether to use
// eager or lazy parsing regardless of the value of this option. However,
// setting this option true suggests that the protocol author believes that
// using lazy parsing on this field is worth the additional bookkeeping
// overhead typically needed to implement it.
//
// This option does not affect the public interface of any generated code;
// all method signatures remain the same. Furthermore, thread-safety of the
// interface is not affected by this option; const methods remain safe to
// call from multiple threads concurrently, while non-const methods continue
// to require exclusive access.
//
//
// Note that implementations may choose not to check required fields within
// a lazy sub-message. That is, calling IsInitialized() on the outer message
// may return true even if the inner message has missing required fields.
// This is necessary because otherwise the inner message would have to be
// parsed in order to perform the check, defeating the purpose of lazy
// parsing. An implementation which chooses not to check required fields
// must be consistent about it. That is, for any particular sub-message, the
// implementation must either *always* check its required fields, or *never*
// check its required fields, regardless of whether or not the message has
// been parsed.
optional bool lazy = 5 [default=false];
// Is this field deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
optional bool deprecated = 3 [default=false];
// For Google-internal migration only. Do not use.
optional bool weak = 10 [default=false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
reserved 4; // removed jtype
}
message OneofOptions {
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message EnumOptions {
// Set this option to true to allow mapping different tag names to the same
// value.
optional bool allow_alias = 2;
// Is this enum deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum, or it will be completely ignored; in the very least, this
// is a formalization for deprecating enums.
optional bool deprecated = 3 [default=false];
reserved 5; // javanano_as_lite
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message EnumValueOptions {
// Is this enum value deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
optional bool deprecated = 1 [default=false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message ServiceOptions {
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
// framework. We apologize for hoarding these numbers to ourselves, but
// we were already using them long before we decided to release Protocol
// Buffers.
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the service, or it will be completely ignored; in the very least,
// this is a formalization for deprecating services.
optional bool deprecated = 33 [default=false];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
message MethodOptions {
// Note: Field numbers 1 through 32 are reserved for Google's internal RPC
// framework. We apologize for hoarding these numbers to ourselves, but
// we were already using them long before we decided to release Protocol
// Buffers.
// Is this method deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
// this is a formalization for deprecating methods.
optional bool deprecated = 33 [default=false];
// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
// or neither? HTTP based RPC implementation may choose GET verb for safe
// methods, and PUT verb for idempotent methods instead of the default POST.
enum IdempotencyLevel {
IDEMPOTENCY_UNKNOWN = 0;
NO_SIDE_EFFECTS = 1; // implies idempotent
IDEMPOTENT = 2; // idempotent, but may have side effects
}
optional IdempotencyLevel idempotency_level =
34 [default=IDEMPOTENCY_UNKNOWN];
// The parser stores options it doesn't recognize here. See above.
repeated UninterpretedOption uninterpreted_option = 999;
// Clients can define custom options in extensions of this message. See above.
extensions 1000 to max;
}
// A message representing a option the parser does not recognize. This only
// appears in options protos created by the compiler::Parser class.
// DescriptorPool resolves these when building Descriptor objects. Therefore,
// options protos in descriptor objects (e.g. returned by Descriptor::options(),
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
// in them.
message UninterpretedOption {
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
// "foo.(bar.baz).qux".
message NamePart {
required string name_part = 1;
required bool is_extension = 2;
}
repeated NamePart name = 2;
// The value of the uninterpreted option, in whatever type the tokenizer
// identified it as during parsing. Exactly one of these should be set.
optional string identifier_value = 3;
optional uint64 positive_int_value = 4;
optional int64 negative_int_value = 5;
optional double double_value = 6;
optional bytes string_value = 7;
optional string aggregate_value = 8;
}
// ===================================================================
// Optional source code info
// Encapsulates information about the original source file from which a
// FileDescriptorProto was generated.
message SourceCodeInfo {
// A Location identifies a piece of source code in a .proto file which
// corresponds to a particular definition. This information is intended
// to be useful to IDEs, code indexers, documentation generators, and similar
// tools.
//
// For example, say we have a file like:
// message Foo {
// optional string foo = 1;
// }
// Let's look at just the field definition:
// optional string foo = 1;
// ^ ^^ ^^ ^ ^^^
// a bc de f ghi
// We have the following locations:
// span path represents
// [a,i) [ 4, 0, 2, 0 ] The whole field definition.
// [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
// [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
// [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
// [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
//
// Notes:
// - A location may refer to a repeated field itself (i.e. not to any
// particular index within it). This is used whenever a set of elements are
// logically enclosed in a single code segment. For example, an entire
// extend block (possibly containing multiple extension definitions) will
// have an outer location whose path refers to the "extensions" repeated
// field without an index.
// - Multiple locations may have the same path. This happens when a single
// logical declaration is spread out across multiple places. The most
// obvious example is the "extend" block again -- there may be multiple
// extend blocks in the same scope, each of which will have the same path.
// - A location's span is not always a subset of its parent's span. For
// example, the "extendee" of an extension declaration appears at the
// beginning of the "extend" block and is shared by all extensions within
// the block.
// - Just because a location's span is a subset of some other location's span
// does not mean that it is a descendent. For example, a "group" defines
// both a type and a field in a single declaration. Thus, the locations
// corresponding to the type and field and their components will overlap.
// - Code which tries to interpret locations should probably be designed to
// ignore those that it doesn't understand, as more types of locations could
// be recorded in the future.
repeated Location location = 1;
message Location {
// Identifies which part of the FileDescriptorProto was defined at this
// location.
//
// Each element is a field number or an index. They form a path from
// the root FileDescriptorProto to the place where the definition. For
// example, this path:
// [ 4, 3, 2, 7, 1 ]
// refers to:
// file.message_type(3) // 4, 3
// .field(7) // 2, 7
// .name() // 1
// This is because FileDescriptorProto.message_type has field number 4:
// repeated DescriptorProto message_type = 4;
// and DescriptorProto.field has field number 2:
// repeated FieldDescriptorProto field = 2;
// and FieldDescriptorProto.name has field number 1:
// optional string name = 1;
//
// Thus, the above path gives the location of a field name. If we removed
// the last element:
// [ 4, 3, 2, 7 ]
// this path refers to the whole field declaration (from the beginning
// of the label to the terminating semicolon).
repeated int32 path = 1 [packed=true];
// Always has exactly three or four elements: start line, start column,
// end line (optional, otherwise assumed same as start line), end column.
// These are packed into a single field for efficiency. Note that line
// and column numbers are zero-based -- typically you will want to add
// 1 to each before displaying to a user.
repeated int32 span = 2 [packed=true];
// If this SourceCodeInfo represents a complete declaration, these are any
// comments appearing before and after the declaration which appear to be
// attached to the declaration.
//
// A series of line comments appearing on consecutive lines, with no other
// tokens appearing on those lines, will be treated as a single comment.
//
// leading_detached_comments will keep paragraphs of comments that appear
// before (but not connected to) the current element. Each paragraph,
// separated by empty lines, will be one comment element in the repeated
// field.
//
// Only the comment content is provided; comment markers (e.g. //) are
// stripped out. For block comments, leading whitespace and an asterisk
// will be stripped from the beginning of each line other than the first.
// Newlines are included in the output.
//
// Examples:
//
// optional int32 foo = 1; // Comment attached to foo.
// // Comment attached to bar.
// optional int32 bar = 2;
//
// optional string baz = 3;
// // Comment attached to baz.
// // Another line attached to baz.
//
// // Comment attached to qux.
// //
// // Another line attached to qux.
// optional double qux = 4;
//
// // Detached comment for corge. This is not leading or trailing comments
// // to qux or corge because there are blank lines separating it from
// // both.
//
// // Detached comment for corge paragraph 2.
//
// optional string corge = 5;
// /* Block comment attached
// * to corge. Leading asterisks
// * will be removed. */
// /* Block comment attached to
// * grault. */
// optional int32 grault = 6;
//
// // ignored detached comments.
optional string leading_comments = 3;
optional string trailing_comments = 4;
repeated string leading_detached_comments = 6;
}
}
// Describes the relationship between generated code and its original source
// file. A GeneratedCodeInfo message is associated with only one generated
// source file, but may contain references to different source .proto files.
message GeneratedCodeInfo {
// An Annotation connects some span of text in generated code to an element
// of its generating .proto file.
repeated Annotation annotation = 1;
message Annotation {
// Identifies the element in the original source .proto file. This field
// is formatted the same as SourceCodeInfo.Location.path.
repeated int32 path = 1 [packed=true];
// Identifies the filesystem path to the original source .proto.
optional string source_file = 2;
// Identifies the starting offset in bytes in the generated code
// that relates to the identified object.
optional int32 begin = 3;
// Identifies the ending offset in bytes in the generated code that
// relates to the identified offset. The end offset should be one past
// the last relevant byte (so the length of the text = end - begin).
optional int32 end = 4;
}
}

View File

@ -1,139 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements functions to marshal proto.Message to/from
// google.protobuf.Any message.
import (
"fmt"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes/any"
)
const googleApis = "type.googleapis.com/"
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
//
// Note that regular type assertions should be done using the Is
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return any.TypeUrl[slash+1:], nil
}
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
func MarshalAny(pb proto.Message) (*any.Any, error) {
value, err := proto.Marshal(pb)
if err != nil {
return nil, err
}
return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in a google.protobuf.Any
// message. The allocated message is stored in the embedded proto.Message.
//
// Example:
//
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
type DynamicAny struct {
proto.Message
}
// Empty returns a new proto.Message of the type specified in a
// google.protobuf.Any message. It returns an error if corresponding message
// type isn't linked in.
func Empty(any *any.Any) (proto.Message, error) {
aname, err := AnyMessageName(any)
if err != nil {
return nil, err
}
t := proto.MessageType(aname)
if t == nil {
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
}
return reflect.New(t.Elem()).Interface().(proto.Message), nil
}
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
// message and places the decoded result in pb. It returns an error if type of
// contents of Any message does not match type of pb message.
//
// pb can be a proto.Message, or a *DynamicAny.
func UnmarshalAny(any *any.Any, pb proto.Message) error {
if d, ok := pb.(*DynamicAny); ok {
if d.Message == nil {
var err error
d.Message, err = Empty(any)
if err != nil {
return err
}
}
return UnmarshalAny(any, d.Message)
}
aname, err := AnyMessageName(any)
if err != nil {
return err
}
mname := proto.MessageName(pb)
if aname != mname {
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
}
return proto.Unmarshal(any.Value, pb)
}
// Is returns true if any value contains a given message type.
func Is(any *any.Any, pb proto.Message) bool {
aname, err := AnyMessageName(any)
if err != nil {
return false
}
return aname == proto.MessageName(pb)
}

View File

@ -1,191 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
package any // import "github.com/golang/protobuf/ptypes/any"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
type Any struct {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Any) Reset() { *m = Any{} }
func (m *Any) String() string { return proto.CompactTextString(m) }
func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) {
return fileDescriptor_any_744b9ca530f228db, []int{0}
}
func (*Any) XXX_WellKnownType() string { return "Any" }
func (m *Any) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Any.Unmarshal(m, b)
}
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
}
func (dst *Any) XXX_Merge(src proto.Message) {
xxx_messageInfo_Any.Merge(dst, src)
}
func (m *Any) XXX_Size() int {
return xxx_messageInfo_Any.Size(m)
}
func (m *Any) XXX_DiscardUnknown() {
xxx_messageInfo_Any.DiscardUnknown(m)
}
var xxx_messageInfo_Any proto.InternalMessageInfo
func (m *Any) GetTypeUrl() string {
if m != nil {
return m.TypeUrl
}
return ""
}
func (m *Any) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
var fileDescriptor_any_744b9ca530f228db = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
}

View File

@ -1,149 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
// URL that describes the type of the serialized message.
//
// Protobuf library provides support to pack/unpack Any values in the form
// of utility functions or additional generated methods of the Any type.
//
// Example 1: Pack and unpack a message in C++.
//
// Foo foo = ...;
// Any any;
// any.PackFrom(foo);
// ...
// if (any.UnpackTo(&foo)) {
// ...
// }
//
// Example 2: Pack and unpack a message in Java.
//
// Foo foo = ...;
// Any any = Any.pack(foo);
// ...
// if (any.is(Foo.class)) {
// foo = any.unpack(Foo.class);
// }
//
// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
// any.Pack(foo)
// ...
// if any.Is(Foo.DESCRIPTOR):
// any.Unpack(foo)
// ...
//
// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
// ...
// foo := &pb.Foo{}
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
// ...
// }
//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
// name "y.z".
//
//
// JSON
// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
//
// package google.profile;
// message Person {
// string first_name = 1;
// string last_name = 2;
// }
//
// {
// "@type": "type.googleapis.com/google.profile.Person",
// "firstName": <string>,
// "lastName": <string>
// }
//
// If the embedded message type is well-known and has a custom JSON
// representation, that representation will be embedded adding a field
// `value` which holds the custom JSON in addition to the `@type`
// field. Example (for message [google.protobuf.Duration][]):
//
// {
// "@type": "type.googleapis.com/google.protobuf.Duration",
// "value": "1.212s"
// }
//
message Any {
// A URL/resource name whose content describes the type of the
// serialized protocol buffer message.
//
// For URLs which use the scheme `http`, `https`, or no scheme, the
// following restrictions and interpretations apply:
//
// * If no scheme is provided, `https` is assumed.
// * The last segment of the URL's path must represent the fully
// qualified name of the type (as in `path/google.protobuf.Duration`).
// The name should be in a canonical form (e.g., leading "." is
// not accepted).
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
// value in binary format, or produce an error.
// * Applications are allowed to cache lookup results based on the
// URL, or have them precompiled into a binary to avoid any
// lookup. Therefore, binary compatibility needs to be preserved
// on changes to types. (Use versioned type names to manage
// breaking changes.)
//
// Schemes other than `http`, `https` (or the empty scheme) might be
// used with implementation specific semantics.
//
string type_url = 1;
// Must be a valid serialized protocol buffer of the above specified type.
bytes value = 2;
}

View File

@ -1,35 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/*
Package ptypes contains code for interacting with well-known types.
*/
package ptypes

View File

@ -1,102 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements conversions between google.protobuf.Duration
// and time.Duration.
import (
"errors"
"fmt"
"time"
durpb "github.com/golang/protobuf/ptypes/duration"
)
const (
// Range of a durpb.Duration in seconds, as specified in
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// validateDuration determines whether the durpb.Duration is valid according to the
// definition in google/protobuf/duration.proto. A valid durpb.Duration
// may still be too large to fit into a time.Duration (the range of durpb.Duration
// is about 10,000 years, and the range of time.Duration is about 290).
func validateDuration(d *durpb.Duration) error {
if d == nil {
return errors.New("duration: nil Duration")
}
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", d)
}
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", d)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
}
return nil
}
// Duration converts a durpb.Duration to a time.Duration. Duration
// returns an error if the durpb.Duration is invalid or is too large to be
// represented in a time.Duration.
func Duration(p *durpb.Duration) (time.Duration, error) {
if err := validateDuration(p); err != nil {
return 0, err
}
d := time.Duration(p.Seconds) * time.Second
if int64(d/time.Second) != p.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
if p.Nanos != 0 {
d += time.Duration(p.Nanos)
if (d < 0) != (p.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durpb.Duration.
func DurationProto(d time.Duration) *durpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durpb.Duration{
Seconds: secs,
Nanos: int32(nanos),
}
}

View File

@ -1,159 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
package duration // import "github.com/golang/protobuf/ptypes/duration"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Duration) Reset() { *m = Duration{} }
func (m *Duration) String() string { return proto.CompactTextString(m) }
func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) {
return fileDescriptor_duration_e7d612259e3f0613, []int{0}
}
func (*Duration) XXX_WellKnownType() string { return "Duration" }
func (m *Duration) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Duration.Unmarshal(m, b)
}
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
}
func (dst *Duration) XXX_Merge(src proto.Message) {
xxx_messageInfo_Duration.Merge(dst, src)
}
func (m *Duration) XXX_Size() int {
return xxx_messageInfo_Duration.Size(m)
}
func (m *Duration) XXX_DiscardUnknown() {
xxx_messageInfo_Duration.DiscardUnknown(m)
}
var xxx_messageInfo_Duration proto.InternalMessageInfo
func (m *Duration) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Duration) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
func init() {
proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
}
var fileDescriptor_duration_e7d612259e3f0613 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
}

View File

@ -1,117 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
// as a count of seconds and fractions of seconds at nanosecond
// resolution. It is independent of any calendar and concepts like "day"
// or "month". It is related to Timestamp in that the difference between
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
// # Examples
//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
// Timestamp end = ...;
// Duration duration = ...;
//
// duration.seconds = end.seconds - start.seconds;
// duration.nanos = end.nanos - start.nanos;
//
// if (duration.seconds < 0 && duration.nanos > 0) {
// duration.seconds += 1;
// duration.nanos -= 1000000000;
// } else if (durations.seconds > 0 && duration.nanos < 0) {
// duration.seconds -= 1;
// duration.nanos += 1000000000;
// }
//
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
//
// Timestamp start = ...;
// Duration duration = ...;
// Timestamp end = ...;
//
// end.seconds = start.seconds + duration.seconds;
// end.nanos = start.nanos + duration.nanos;
//
// if (end.nanos < 0) {
// end.seconds -= 1;
// end.nanos += 1000000000;
// } else if (end.nanos >= 1000000000) {
// end.seconds += 1;
// end.nanos -= 1000000000;
// }
//
// Example 3: Compute Duration from datetime.timedelta in Python.
//
// td = datetime.timedelta(days=3, minutes=10)
// duration = Duration()
// duration.FromTimedelta(td)
//
// # JSON Mapping
//
// In JSON format, the Duration type is encoded as a string rather than an
// object, where the string ends in the suffix "s" (indicating seconds) and
// is preceded by the number of seconds, with nanoseconds expressed as
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
// `seconds` field and a positive or negative `nanos` field. For durations
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
int32 nanos = 2;
}

View File

@ -1,440 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/struct.proto
package structpb // import "github.com/golang/protobuf/ptypes/struct"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// `NullValue` is a singleton enumeration to represent the null value for the
// `Value` type union.
//
// The JSON representation for `NullValue` is JSON `null`.
type NullValue int32
const (
// Null value.
NullValue_NULL_VALUE NullValue = 0
)
var NullValue_name = map[int32]string{
0: "NULL_VALUE",
}
var NullValue_value = map[string]int32{
"NULL_VALUE": 0,
}
func (x NullValue) String() string {
return proto.EnumName(NullValue_name, int32(x))
}
func (NullValue) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
}
func (NullValue) XXX_WellKnownType() string { return "NullValue" }
// `Struct` represents a structured data value, consisting of fields
// which map to dynamically typed values. In some languages, `Struct`
// might be supported by a native representation. For example, in
// scripting languages like JS a struct is represented as an
// object. The details of that representation are described together
// with the proto support for the language.
//
// The JSON representation for `Struct` is JSON object.
type Struct struct {
// Unordered map of dynamically typed values.
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Struct) Reset() { *m = Struct{} }
func (m *Struct) String() string { return proto.CompactTextString(m) }
func (*Struct) ProtoMessage() {}
func (*Struct) Descriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
}
func (*Struct) XXX_WellKnownType() string { return "Struct" }
func (m *Struct) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Struct.Unmarshal(m, b)
}
func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
}
func (dst *Struct) XXX_Merge(src proto.Message) {
xxx_messageInfo_Struct.Merge(dst, src)
}
func (m *Struct) XXX_Size() int {
return xxx_messageInfo_Struct.Size(m)
}
func (m *Struct) XXX_DiscardUnknown() {
xxx_messageInfo_Struct.DiscardUnknown(m)
}
var xxx_messageInfo_Struct proto.InternalMessageInfo
func (m *Struct) GetFields() map[string]*Value {
if m != nil {
return m.Fields
}
return nil
}
// `Value` represents a dynamically typed value which can be either
// null, a number, a string, a boolean, a recursive struct value, or a
// list of values. A producer of value is expected to set one of that
// variants, absence of any variant indicates an error.
//
// The JSON representation for `Value` is JSON value.
type Value struct {
// The kind of value.
//
// Types that are valid to be assigned to Kind:
// *Value_NullValue
// *Value_NumberValue
// *Value_StringValue
// *Value_BoolValue
// *Value_StructValue
// *Value_ListValue
Kind isValue_Kind `protobuf_oneof:"kind"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Value) Reset() { *m = Value{} }
func (m *Value) String() string { return proto.CompactTextString(m) }
func (*Value) ProtoMessage() {}
func (*Value) Descriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{1}
}
func (*Value) XXX_WellKnownType() string { return "Value" }
func (m *Value) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Value.Unmarshal(m, b)
}
func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Value.Marshal(b, m, deterministic)
}
func (dst *Value) XXX_Merge(src proto.Message) {
xxx_messageInfo_Value.Merge(dst, src)
}
func (m *Value) XXX_Size() int {
return xxx_messageInfo_Value.Size(m)
}
func (m *Value) XXX_DiscardUnknown() {
xxx_messageInfo_Value.DiscardUnknown(m)
}
var xxx_messageInfo_Value proto.InternalMessageInfo
type isValue_Kind interface {
isValue_Kind()
}
type Value_NullValue struct {
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
}
type Value_NumberValue struct {
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"`
}
type Value_StringValue struct {
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"`
}
type Value_BoolValue struct {
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"`
}
type Value_StructValue struct {
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"`
}
type Value_ListValue struct {
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"`
}
func (*Value_NullValue) isValue_Kind() {}
func (*Value_NumberValue) isValue_Kind() {}
func (*Value_StringValue) isValue_Kind() {}
func (*Value_BoolValue) isValue_Kind() {}
func (*Value_StructValue) isValue_Kind() {}
func (*Value_ListValue) isValue_Kind() {}
func (m *Value) GetKind() isValue_Kind {
if m != nil {
return m.Kind
}
return nil
}
func (m *Value) GetNullValue() NullValue {
if x, ok := m.GetKind().(*Value_NullValue); ok {
return x.NullValue
}
return NullValue_NULL_VALUE
}
func (m *Value) GetNumberValue() float64 {
if x, ok := m.GetKind().(*Value_NumberValue); ok {
return x.NumberValue
}
return 0
}
func (m *Value) GetStringValue() string {
if x, ok := m.GetKind().(*Value_StringValue); ok {
return x.StringValue
}
return ""
}
func (m *Value) GetBoolValue() bool {
if x, ok := m.GetKind().(*Value_BoolValue); ok {
return x.BoolValue
}
return false
}
func (m *Value) GetStructValue() *Struct {
if x, ok := m.GetKind().(*Value_StructValue); ok {
return x.StructValue
}
return nil
}
func (m *Value) GetListValue() *ListValue {
if x, ok := m.GetKind().(*Value_ListValue); ok {
return x.ListValue
}
return nil
}
// XXX_OneofFuncs is for the internal use of the proto package.
func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
(*Value_StringValue)(nil),
(*Value_BoolValue)(nil),
(*Value_StructValue)(nil),
(*Value_ListValue)(nil),
}
}
func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
m := msg.(*Value)
// kind
switch x := m.Kind.(type) {
case *Value_NullValue:
b.EncodeVarint(1<<3 | proto.WireVarint)
b.EncodeVarint(uint64(x.NullValue))
case *Value_NumberValue:
b.EncodeVarint(2<<3 | proto.WireFixed64)
b.EncodeFixed64(math.Float64bits(x.NumberValue))
case *Value_StringValue:
b.EncodeVarint(3<<3 | proto.WireBytes)
b.EncodeStringBytes(x.StringValue)
case *Value_BoolValue:
t := uint64(0)
if x.BoolValue {
t = 1
}
b.EncodeVarint(4<<3 | proto.WireVarint)
b.EncodeVarint(t)
case *Value_StructValue:
b.EncodeVarint(5<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.StructValue); err != nil {
return err
}
case *Value_ListValue:
b.EncodeVarint(6<<3 | proto.WireBytes)
if err := b.EncodeMessage(x.ListValue); err != nil {
return err
}
case nil:
default:
return fmt.Errorf("Value.Kind has unexpected type %T", x)
}
return nil
}
func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
m := msg.(*Value)
switch tag {
case 1: // kind.null_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Kind = &Value_NullValue{NullValue(x)}
return true, err
case 2: // kind.number_value
if wire != proto.WireFixed64 {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeFixed64()
m.Kind = &Value_NumberValue{math.Float64frombits(x)}
return true, err
case 3: // kind.string_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeStringBytes()
m.Kind = &Value_StringValue{x}
return true, err
case 4: // kind.bool_value
if wire != proto.WireVarint {
return true, proto.ErrInternalBadWireType
}
x, err := b.DecodeVarint()
m.Kind = &Value_BoolValue{x != 0}
return true, err
case 5: // kind.struct_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(Struct)
err := b.DecodeMessage(msg)
m.Kind = &Value_StructValue{msg}
return true, err
case 6: // kind.list_value
if wire != proto.WireBytes {
return true, proto.ErrInternalBadWireType
}
msg := new(ListValue)
err := b.DecodeMessage(msg)
m.Kind = &Value_ListValue{msg}
return true, err
default:
return false, nil
}
}
func _Value_OneofSizer(msg proto.Message) (n int) {
m := msg.(*Value)
// kind
switch x := m.Kind.(type) {
case *Value_NullValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.NullValue))
case *Value_NumberValue:
n += 1 // tag and wire
n += 8
case *Value_StringValue:
n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.StringValue)))
n += len(x.StringValue)
case *Value_BoolValue:
n += 1 // tag and wire
n += 1
case *Value_StructValue:
s := proto.Size(x.StructValue)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *Value_ListValue:
s := proto.Size(x.ListValue)
n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
default:
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
}
return n
}
// `ListValue` is a wrapper around a repeated field of values.
//
// The JSON representation for `ListValue` is JSON array.
type ListValue struct {
// Repeated field of dynamically typed values.
Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListValue) Reset() { *m = ListValue{} }
func (m *ListValue) String() string { return proto.CompactTextString(m) }
func (*ListValue) ProtoMessage() {}
func (*ListValue) Descriptor() ([]byte, []int) {
return fileDescriptor_struct_3a5a94e0c7801b27, []int{2}
}
func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
func (m *ListValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListValue.Unmarshal(m, b)
}
func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
}
func (dst *ListValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListValue.Merge(dst, src)
}
func (m *ListValue) XXX_Size() int {
return xxx_messageInfo_ListValue.Size(m)
}
func (m *ListValue) XXX_DiscardUnknown() {
xxx_messageInfo_ListValue.DiscardUnknown(m)
}
var xxx_messageInfo_ListValue proto.InternalMessageInfo
func (m *ListValue) GetValues() []*Value {
if m != nil {
return m.Values
}
return nil
}
func init() {
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
}
func init() {
proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27)
}
var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{
// 417 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
0x00,
}

View File

@ -1,96 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
option java_package = "com.google.protobuf";
option java_outer_classname = "StructProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// `Struct` represents a structured data value, consisting of fields
// which map to dynamically typed values. In some languages, `Struct`
// might be supported by a native representation. For example, in
// scripting languages like JS a struct is represented as an
// object. The details of that representation are described together
// with the proto support for the language.
//
// The JSON representation for `Struct` is JSON object.
message Struct {
// Unordered map of dynamically typed values.
map<string, Value> fields = 1;
}
// `Value` represents a dynamically typed value which can be either
// null, a number, a string, a boolean, a recursive struct value, or a
// list of values. A producer of value is expected to set one of that
// variants, absence of any variant indicates an error.
//
// The JSON representation for `Value` is JSON value.
message Value {
// The kind of value.
oneof kind {
// Represents a null value.
NullValue null_value = 1;
// Represents a double value.
double number_value = 2;
// Represents a string value.
string string_value = 3;
// Represents a boolean value.
bool bool_value = 4;
// Represents a structured value.
Struct struct_value = 5;
// Represents a repeated `Value`.
ListValue list_value = 6;
}
}
// `NullValue` is a singleton enumeration to represent the null value for the
// `Value` type union.
//
// The JSON representation for `NullValue` is JSON `null`.
enum NullValue {
// Null value.
NULL_VALUE = 0;
}
// `ListValue` is a wrapper around a repeated field of values.
//
// The JSON representation for `ListValue` is JSON array.
message ListValue {
// Repeated field of dynamically typed values.
repeated Value values = 1;
}

View File

@ -1,134 +0,0 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2016 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package ptypes
// This file implements operations on google.protobuf.Timestamp.
import (
"errors"
"fmt"
"time"
tspb "github.com/golang/protobuf/ptypes/timestamp"
)
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range
// [0001-01-01, 10000-01-01) and has a Nanos field
// in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes
// the problem.
//
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
func validateTimestamp(ts *tspb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}
// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return value
// is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
func TimestampNow() *tspb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
seconds := t.Unix()
nanos := int32(t.Sub(time.Unix(seconds, 0)))
ts := &tspb.Timestamp{
Seconds: seconds,
Nanos: nanos,
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
// Timestamps, it returns an error message in parentheses.
func TimestampString(ts *tspb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}

View File

@ -1,175 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Timestamp) Reset() { *m = Timestamp{} }
func (m *Timestamp) String() string { return proto.CompactTextString(m) }
func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) {
return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
}
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Timestamp.Unmarshal(m, b)
}
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
}
func (dst *Timestamp) XXX_Merge(src proto.Message) {
xxx_messageInfo_Timestamp.Merge(dst, src)
}
func (m *Timestamp) XXX_Size() int {
return xxx_messageInfo_Timestamp.Size(m)
}
func (m *Timestamp) XXX_DiscardUnknown() {
xxx_messageInfo_Timestamp.DiscardUnknown(m)
}
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *Timestamp) GetNanos() int32 {
if m != nil {
return m.Nanos
}
return 0
}
func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
func init() {
proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
}
var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
}

View File

@ -1,133 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/timestamp";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// A Timestamp represents a point in time independent of any time zone
// or calendar, represented as seconds and fractions of seconds at
// nanosecond resolution in UTC Epoch time. It is encoded using the
// Proleptic Gregorian Calendar which extends the Gregorian calendar
// backwards to year one. It is encoded assuming all minutes are 60
// seconds long, i.e. leap seconds are "smeared" so that no leap second
// table is needed for interpretation. Range is from
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
// By restricting to that range, we ensure that we can convert to
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
// # Examples
//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
// timestamp.set_seconds(time(NULL));
// timestamp.set_nanos(0);
//
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
//
// struct timeval tv;
// gettimeofday(&tv, NULL);
//
// Timestamp timestamp;
// timestamp.set_seconds(tv.tv_sec);
// timestamp.set_nanos(tv.tv_usec * 1000);
//
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
//
// FILETIME ft;
// GetSystemTimeAsFileTime(&ft);
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
//
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
// Timestamp timestamp;
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
//
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
//
// long millis = System.currentTimeMillis();
//
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
// .setNanos((int) ((millis % 1000) * 1000000)).build();
//
//
// Example 5: Compute Timestamp from current time in Python.
//
// timestamp = Timestamp()
// timestamp.GetCurrentTime()
//
// # JSON Mapping
//
// In JSON format, the Timestamp type is encoded as a string in the
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
// where {year} is always expressed using four digits while {month}, {day},
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
// is required, though only UTC (as indicated by "Z") is presently supported.
//
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
// 01:30 UTC on January 15, 2017.
//
// In JavaScript, one can convert a Date object to this format using the
// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
// method. In Python, a standard `datetime.datetime` object can be converted
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
// Non-negative fractions of a second at nanosecond resolution. Negative
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
int32 nanos = 2;
}

View File

@ -1,443 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/wrappers.proto
package wrappers // import "github.com/golang/protobuf/ptypes/wrappers"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Wrapper message for `double`.
//
// The JSON representation for `DoubleValue` is JSON number.
type DoubleValue struct {
// The double value.
Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DoubleValue) Reset() { *m = DoubleValue{} }
func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
func (*DoubleValue) ProtoMessage() {}
func (*DoubleValue) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{0}
}
func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
}
func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
}
func (dst *DoubleValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_DoubleValue.Merge(dst, src)
}
func (m *DoubleValue) XXX_Size() int {
return xxx_messageInfo_DoubleValue.Size(m)
}
func (m *DoubleValue) XXX_DiscardUnknown() {
xxx_messageInfo_DoubleValue.DiscardUnknown(m)
}
var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
func (m *DoubleValue) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `float`.
//
// The JSON representation for `FloatValue` is JSON number.
type FloatValue struct {
// The float value.
Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *FloatValue) Reset() { *m = FloatValue{} }
func (m *FloatValue) String() string { return proto.CompactTextString(m) }
func (*FloatValue) ProtoMessage() {}
func (*FloatValue) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{1}
}
func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
func (m *FloatValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_FloatValue.Unmarshal(m, b)
}
func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
}
func (dst *FloatValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_FloatValue.Merge(dst, src)
}
func (m *FloatValue) XXX_Size() int {
return xxx_messageInfo_FloatValue.Size(m)
}
func (m *FloatValue) XXX_DiscardUnknown() {
xxx_messageInfo_FloatValue.DiscardUnknown(m)
}
var xxx_messageInfo_FloatValue proto.InternalMessageInfo
func (m *FloatValue) GetValue() float32 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `int64`.
//
// The JSON representation for `Int64Value` is JSON string.
type Int64Value struct {
// The int64 value.
Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Int64Value) Reset() { *m = Int64Value{} }
func (m *Int64Value) String() string { return proto.CompactTextString(m) }
func (*Int64Value) ProtoMessage() {}
func (*Int64Value) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{2}
}
func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
func (m *Int64Value) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Int64Value.Unmarshal(m, b)
}
func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
}
func (dst *Int64Value) XXX_Merge(src proto.Message) {
xxx_messageInfo_Int64Value.Merge(dst, src)
}
func (m *Int64Value) XXX_Size() int {
return xxx_messageInfo_Int64Value.Size(m)
}
func (m *Int64Value) XXX_DiscardUnknown() {
xxx_messageInfo_Int64Value.DiscardUnknown(m)
}
var xxx_messageInfo_Int64Value proto.InternalMessageInfo
func (m *Int64Value) GetValue() int64 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `uint64`.
//
// The JSON representation for `UInt64Value` is JSON string.
type UInt64Value struct {
// The uint64 value.
Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UInt64Value) Reset() { *m = UInt64Value{} }
func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
func (*UInt64Value) ProtoMessage() {}
func (*UInt64Value) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{3}
}
func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
}
func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
}
func (dst *UInt64Value) XXX_Merge(src proto.Message) {
xxx_messageInfo_UInt64Value.Merge(dst, src)
}
func (m *UInt64Value) XXX_Size() int {
return xxx_messageInfo_UInt64Value.Size(m)
}
func (m *UInt64Value) XXX_DiscardUnknown() {
xxx_messageInfo_UInt64Value.DiscardUnknown(m)
}
var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
func (m *UInt64Value) GetValue() uint64 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `int32`.
//
// The JSON representation for `Int32Value` is JSON number.
type Int32Value struct {
// The int32 value.
Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Int32Value) Reset() { *m = Int32Value{} }
func (m *Int32Value) String() string { return proto.CompactTextString(m) }
func (*Int32Value) ProtoMessage() {}
func (*Int32Value) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{4}
}
func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
func (m *Int32Value) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Int32Value.Unmarshal(m, b)
}
func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
}
func (dst *Int32Value) XXX_Merge(src proto.Message) {
xxx_messageInfo_Int32Value.Merge(dst, src)
}
func (m *Int32Value) XXX_Size() int {
return xxx_messageInfo_Int32Value.Size(m)
}
func (m *Int32Value) XXX_DiscardUnknown() {
xxx_messageInfo_Int32Value.DiscardUnknown(m)
}
var xxx_messageInfo_Int32Value proto.InternalMessageInfo
func (m *Int32Value) GetValue() int32 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `uint32`.
//
// The JSON representation for `UInt32Value` is JSON number.
type UInt32Value struct {
// The uint32 value.
Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *UInt32Value) Reset() { *m = UInt32Value{} }
func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
func (*UInt32Value) ProtoMessage() {}
func (*UInt32Value) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{5}
}
func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
}
func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
}
func (dst *UInt32Value) XXX_Merge(src proto.Message) {
xxx_messageInfo_UInt32Value.Merge(dst, src)
}
func (m *UInt32Value) XXX_Size() int {
return xxx_messageInfo_UInt32Value.Size(m)
}
func (m *UInt32Value) XXX_DiscardUnknown() {
xxx_messageInfo_UInt32Value.DiscardUnknown(m)
}
var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
func (m *UInt32Value) GetValue() uint32 {
if m != nil {
return m.Value
}
return 0
}
// Wrapper message for `bool`.
//
// The JSON representation for `BoolValue` is JSON `true` and `false`.
type BoolValue struct {
// The bool value.
Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BoolValue) Reset() { *m = BoolValue{} }
func (m *BoolValue) String() string { return proto.CompactTextString(m) }
func (*BoolValue) ProtoMessage() {}
func (*BoolValue) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{6}
}
func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
func (m *BoolValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BoolValue.Unmarshal(m, b)
}
func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
}
func (dst *BoolValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_BoolValue.Merge(dst, src)
}
func (m *BoolValue) XXX_Size() int {
return xxx_messageInfo_BoolValue.Size(m)
}
func (m *BoolValue) XXX_DiscardUnknown() {
xxx_messageInfo_BoolValue.DiscardUnknown(m)
}
var xxx_messageInfo_BoolValue proto.InternalMessageInfo
func (m *BoolValue) GetValue() bool {
if m != nil {
return m.Value
}
return false
}
// Wrapper message for `string`.
//
// The JSON representation for `StringValue` is JSON string.
type StringValue struct {
// The string value.
Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StringValue) Reset() { *m = StringValue{} }
func (m *StringValue) String() string { return proto.CompactTextString(m) }
func (*StringValue) ProtoMessage() {}
func (*StringValue) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{7}
}
func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
func (m *StringValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StringValue.Unmarshal(m, b)
}
func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
}
func (dst *StringValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_StringValue.Merge(dst, src)
}
func (m *StringValue) XXX_Size() int {
return xxx_messageInfo_StringValue.Size(m)
}
func (m *StringValue) XXX_DiscardUnknown() {
xxx_messageInfo_StringValue.DiscardUnknown(m)
}
var xxx_messageInfo_StringValue proto.InternalMessageInfo
func (m *StringValue) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
// Wrapper message for `bytes`.
//
// The JSON representation for `BytesValue` is JSON string.
type BytesValue struct {
// The bytes value.
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BytesValue) Reset() { *m = BytesValue{} }
func (m *BytesValue) String() string { return proto.CompactTextString(m) }
func (*BytesValue) ProtoMessage() {}
func (*BytesValue) Descriptor() ([]byte, []int) {
return fileDescriptor_wrappers_16c7c35c009f3253, []int{8}
}
func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
func (m *BytesValue) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BytesValue.Unmarshal(m, b)
}
func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
}
func (dst *BytesValue) XXX_Merge(src proto.Message) {
xxx_messageInfo_BytesValue.Merge(dst, src)
}
func (m *BytesValue) XXX_Size() int {
return xxx_messageInfo_BytesValue.Size(m)
}
func (m *BytesValue) XXX_DiscardUnknown() {
xxx_messageInfo_BytesValue.DiscardUnknown(m)
}
var xxx_messageInfo_BytesValue proto.InternalMessageInfo
func (m *BytesValue) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func init() {
proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
}
func init() {
proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_wrappers_16c7c35c009f3253)
}
var fileDescriptor_wrappers_16c7c35c009f3253 = []byte{
// 259 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
0x01, 0x00, 0x00,
}

View File

@ -1,118 +0,0 @@
// Protocol Buffers - Google's data interchange format
// Copyright 2008 Google Inc. All rights reserved.
// https://developers.google.com/protocol-buffers/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Wrappers for primitive (non-message) types. These types are useful
// for embedding primitives in the `google.protobuf.Any` type and for places
// where we need to distinguish between the absence of a primitive
// typed field and its default value.
syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/wrappers";
option java_package = "com.google.protobuf";
option java_outer_classname = "WrappersProto";
option java_multiple_files = true;
option objc_class_prefix = "GPB";
// Wrapper message for `double`.
//
// The JSON representation for `DoubleValue` is JSON number.
message DoubleValue {
// The double value.
double value = 1;
}
// Wrapper message for `float`.
//
// The JSON representation for `FloatValue` is JSON number.
message FloatValue {
// The float value.
float value = 1;
}
// Wrapper message for `int64`.
//
// The JSON representation for `Int64Value` is JSON string.
message Int64Value {
// The int64 value.
int64 value = 1;
}
// Wrapper message for `uint64`.
//
// The JSON representation for `UInt64Value` is JSON string.
message UInt64Value {
// The uint64 value.
uint64 value = 1;
}
// Wrapper message for `int32`.
//
// The JSON representation for `Int32Value` is JSON number.
message Int32Value {
// The int32 value.
int32 value = 1;
}
// Wrapper message for `uint32`.
//
// The JSON representation for `UInt32Value` is JSON number.
message UInt32Value {
// The uint32 value.
uint32 value = 1;
}
// Wrapper message for `bool`.
//
// The JSON representation for `BoolValue` is JSON `true` and `false`.
message BoolValue {
// The bool value.
bool value = 1;
}
// Wrapper message for `string`.
//
// The JSON representation for `StringValue` is JSON string.
message StringValue {
// The string value.
string value = 1;
}
// Wrapper message for `bytes`.
//
// The JSON representation for `BytesValue` is JSON string.
message BytesValue {
// The bytes value.
bytes value = 1;
}

View File

@ -1 +0,0 @@
*.cover

View File

@ -1,15 +0,0 @@
sudo: false
language: go
go:
- 1.6
- 1.7
before_install:
- go get golang.org/x/tools/cmd/cover
- go get golang.org/x/tools/cmd/goimports
script:
- gofmt -l .
- goimports -l .
- go tool vet .
- go test -coverprofile=coverage.txt -covermode=atomic
after_success:
- bash <(curl -s https://codecov.io/bash)

View File

@ -1,27 +0,0 @@
Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement]
(https://cla.developers.google.com/about/google-individual)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use Github pull requests for this purpose.
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the
[Software Grant and Corporate Contributor License Agreement]
(https://cla.developers.google.com/about/google-corporate).

Some files were not shown because too many files have changed in this diff Show More